From 14632dbfc02f14109f6ca7183039a740babeb705 Mon Sep 17 00:00:00 2001 From: Hoonmin Kim Date: Tue, 29 Apr 2014 18:58:18 +0900 Subject: [PATCH] Initial commit for open source Arcus Java client. This is the Java client for Arcus, based on spymemcached. --- .gitignore | 7 + AUTHORS | 5 + LICENSE | 202 + LICENSE.txt-spymemcached | 19 + PATENTS | 7 + README.md | 148 + buildfile | 86 + docs/arcus-java-client-getting-started.md | 303 ++ etc/MemcachedOptimization.graffle | Bin 0 -> 2554 bytes pom.xml | 173 + src/main/java/net/spy/memcached/AddrUtil.java | 52 + .../java/net/spy/memcached/ArcusClient.java | 3886 +++++++++++++++++ .../spy/memcached/ArcusClientException.java | 39 + .../java/net/spy/memcached/ArcusClientIF.java | 1696 +++++++ .../net/spy/memcached/ArcusClientPool.java | 1013 +++++ .../spy/memcached/ArcusKetamaNodeLocator.java | 238 + .../net/spy/memcached/ArcusMBeanServer.java | 62 + .../spy/memcached/ArrayModNodeLocator.java | 125 + .../java/net/spy/memcached/BaseCacheMap.java | 126 + .../memcached/BinaryConnectionFactory.java | 57 + .../net/spy/memcached/BroadcastOpFactory.java | 17 + .../java/net/spy/memcached/BulkService.java | 278 ++ .../java/net/spy/memcached/CASMutation.java | 15 + .../java/net/spy/memcached/CASMutator.java | 129 + .../java/net/spy/memcached/CASResponse.java | 23 + src/main/java/net/spy/memcached/CASValue.java | 41 + .../java/net/spy/memcached/CacheManager.java | 362 ++ src/main/java/net/spy/memcached/CacheMap.java | 40 + .../java/net/spy/memcached/CacheMonitor.java | 203 + .../java/net/spy/memcached/CachedData.java | 58 + .../CollectionOperationException.java | 30 + .../net/spy/memcached/ConnectionFactory.java | 197 + .../memcached/ConnectionFactoryBuilder.java | 505 +++ .../net/spy/memcached/ConnectionObserver.java | 27 + .../memcached/DefaultConnectionFactory.java | 407 ++ .../java/net/spy/memcached/FailureMode.java | 34 + .../java/net/spy/memcached/HashAlgorithm.java | 147 + .../memcached/KetamaConnectionFactory.java | 46 + .../net/spy/memcached/KetamaNodeLocator.java | 191 + src/main/java/net/spy/memcached/KeyUtil.java | 39 + .../net/spy/memcached/MemcachedClient.java | 1874 ++++++++ .../net/spy/memcached/MemcachedClientIF.java | 157 + .../spy/memcached/MemcachedConnection.java | 839 ++++ .../java/net/spy/memcached/MemcachedNode.java | 233 + .../spy/memcached/MemcachedNodeROImpl.java | 188 + .../java/net/spy/memcached/NodeLocator.java | 60 + .../NotExistsServiceCodeException.java | 27 + .../net/spy/memcached/OperationFactory.java | 473 ++ .../memcached/OperationTimeoutException.java | 37 + .../net/spy/memcached/PartitionedList.java | 57 + .../net/spy/memcached/PartitionedMap.java | 83 + .../net/spy/memcached/StatisticsHandler.java | 198 + .../spy/memcached/auth/AuthDescriptor.java | 59 + .../net/spy/memcached/auth/AuthThread.java | 104 + .../spy/memcached/auth/AuthThreadMonitor.java | 56 + .../memcached/auth/PlainCallbackHandler.java | 48 + .../java/net/spy/memcached/auth/package.html | 17 + .../spy/memcached/collection/Attributes.java | 100 + .../spy/memcached/collection/BKeyObject.java | 108 + .../spy/memcached/collection/BTreeCount.java | 68 + .../spy/memcached/collection/BTreeCreate.java | 34 + .../spy/memcached/collection/BTreeDelete.java | 134 + .../memcached/collection/BTreeElement.java | 41 + .../collection/BTreeFindPosition.java | 67 + .../spy/memcached/collection/BTreeGet.java | 157 + .../memcached/collection/BTreeGetBulk.java | 50 + .../collection/BTreeGetBulkImpl.java | 155 + .../BTreeGetBulkWithByteTypeBkey.java | 58 + .../BTreeGetBulkWithLongTypeBkey.java | 55 + .../collection/BTreeGetByPosition.java | 150 + .../memcached/collection/BTreeGetResult.java | 46 + .../spy/memcached/collection/BTreeMutate.java | 51 + .../spy/memcached/collection/BTreeOrder.java | 37 + .../spy/memcached/collection/BTreeSMGet.java | 50 + .../BTreeSMGetWithByteTypeBkey.java | 171 + .../BTreeSMGetWithLongTypeBkey.java | 174 + .../spy/memcached/collection/BTreeStore.java | 35 + .../collection/BTreeStoreAndGet.java | 122 + .../spy/memcached/collection/BTreeUpdate.java | 31 + .../spy/memcached/collection/BTreeUpsert.java | 36 + .../memcached/collection/ByteArrayBKey.java | 54 + .../collection/ByteArrayTreeMap.java | 38 + .../collection/CollectionAttributes.java | 216 + .../collection/CollectionBulkStore.java | 280 ++ .../memcached/collection/CollectionCount.java | 24 + .../collection/CollectionCreate.java | 71 + .../collection/CollectionDelete.java | 55 + .../memcached/collection/CollectionExist.java | 59 + .../memcached/collection/CollectionGet.java | 71 + .../collection/CollectionMutate.java | 24 + .../collection/CollectionObject.java | 45 + .../collection/CollectionOverflowAction.java | 80 + .../collection/CollectionPipedStore.java | 353 ++ .../collection/CollectionPipedUpdate.java | 176 + .../collection/CollectionResponse.java | 95 + .../memcached/collection/CollectionStore.java | 148 + .../memcached/collection/CollectionType.java | 60 + .../collection/CollectionUpdate.java | 141 + .../net/spy/memcached/collection/Element.java | 160 + .../collection/ElementFlagFilter.java | 181 + .../collection/ElementFlagUpdate.java | 111 + .../collection/ElementMultiFlagsFilter.java | 104 + .../collection/ElementValueType.java | 71 + .../collection/ExtendedBTreeGet.java | 137 + .../spy/memcached/collection/ListCreate.java | 35 + .../spy/memcached/collection/ListDelete.java | 65 + .../net/spy/memcached/collection/ListGet.java | 84 + .../spy/memcached/collection/ListStore.java | 35 + .../spy/memcached/collection/RequestMode.java | 36 + .../memcached/collection/SMGetElement.java | 118 + .../spy/memcached/collection/SetCreate.java | 35 + .../spy/memcached/collection/SetDelete.java | 72 + .../spy/memcached/collection/SetExist.java | 33 + .../net/spy/memcached/collection/SetGet.java | 70 + .../memcached/collection/SetPipedExist.java | 96 + .../spy/memcached/collection/SetStore.java | 33 + .../net/spy/memcached/compat/CloseUtil.java | 34 + .../net/spy/memcached/compat/SpyObject.java | 35 + .../net/spy/memcached/compat/SpyThread.java | 45 + .../net/spy/memcached/compat/SyncThread.java | 112 + .../memcached/compat/log/AbstractLogger.java | 225 + .../memcached/compat/log/DefaultLogger.java | 59 + .../net/spy/memcached/compat/log/Level.java | 39 + .../spy/memcached/compat/log/Log4JLogger.java | 79 + .../net/spy/memcached/compat/log/Logger.java | 155 + .../memcached/compat/log/LoggerFactory.java | 162 + .../spy/memcached/compat/log/SunLogger.java | 110 + .../net/spy/memcached/compat/log/package.html | 26 + .../net/spy/memcached/compat/package.html | 19 + .../internal/BTreeStoreAndGetFuture.java | 55 + .../internal/BasicThreadFactory.java | 27 + .../spy/memcached/internal/BulkFuture.java | 47 + .../spy/memcached/internal/BulkGetFuture.java | 197 + .../CheckedOperationTimeoutException.java | 85 + .../memcached/internal/CollectionFuture.java | 120 + .../internal/CollectionGetBulkFuture.java | 115 + .../net/spy/memcached/internal/GetFuture.java | 56 + .../memcached/internal/ImmediateFuture.java | 56 + .../memcached/internal/OperationFuture.java | 112 + .../spy/memcached/internal/ReconnectJob.java | 51 + .../spy/memcached/internal/SMGetFuture.java | 80 + .../SingleElementInfiniteIterator.java | 36 + .../net/spy/memcached/internal/package.html | 14 + .../ops/ArrayOperationQueueFactory.java | 31 + .../ops/BTreeFindPositionOperation.java | 29 + .../memcached/ops/BTreeGetBulkOperation.java | 24 + .../ops/BTreeGetByPositionOperation.java | 30 + .../ops/BTreeSortMergeGetOperation.java | 24 + .../ops/BTreeStoreAndGetOperation.java | 30 + .../memcached/ops/BaseOperationFactory.java | 108 + .../net/spy/memcached/ops/CASOperation.java | 38 + .../spy/memcached/ops/CASOperationStatus.java | 24 + .../ops/CancelledOperationStatus.java | 12 + .../ops/CollectionBulkStoreOperation.java | 32 + .../ops/CollectionCountOperation.java | 24 + .../ops/CollectionCreateOperation.java | 28 + .../ops/CollectionDeleteOperation.java | 28 + .../ops/CollectionExistOperation.java | 30 + .../memcached/ops/CollectionGetOperation.java | 32 + .../ops/CollectionMutateOperation.java | 24 + .../ops/CollectionOperationStatus.java | 46 + .../ops/CollectionPipedExistOperation.java | 32 + .../ops/CollectionPipedStoreOperation.java | 32 + .../ops/CollectionPipedUpdateOperation.java | 32 + .../ops/CollectionStoreOperation.java | 32 + .../ops/CollectionUpdateOperation.java | 32 + .../ops/CollectionUpsertOperation.java | 32 + .../memcached/ops/ConcatenationOperation.java | 28 + .../spy/memcached/ops/ConcatenationType.java | 15 + .../spy/memcached/ops/DeleteOperation.java | 9 + .../ops/ExtendedBTreeGetOperation.java | 32 + .../net/spy/memcached/ops/FlushOperation.java | 9 + .../spy/memcached/ops/GetAttrOperation.java | 37 + .../net/spy/memcached/ops/GetOperation.java | 23 + .../net/spy/memcached/ops/GetsOperation.java | 24 + .../net/spy/memcached/ops/KeyedOperation.java | 15 + .../ops/LinkedOperationQueueFactory.java | 19 + .../ops/MultiGetOperationCallback.java | 17 + .../ops/MultiGetsOperationCallback.java | 18 + .../memcached/ops/MultiOperationCallback.java | 41 + .../java/net/spy/memcached/ops/Mutator.java | 15 + .../spy/memcached/ops/MutatorOperation.java | 27 + .../net/spy/memcached/ops/NoopOperation.java | 8 + .../java/net/spy/memcached/ops/Operation.java | 83 + .../spy/memcached/ops/OperationCallback.java | 21 + .../spy/memcached/ops/OperationErrorType.java | 20 + .../spy/memcached/ops/OperationException.java | 49 + .../memcached/ops/OperationQueueFactory.java | 16 + .../net/spy/memcached/ops/OperationState.java | 39 + .../spy/memcached/ops/OperationStatus.java | 35 + .../spy/memcached/ops/SASLAuthOperation.java | 8 + .../spy/memcached/ops/SASLMechsOperation.java | 8 + .../spy/memcached/ops/SASLStepOperation.java | 8 + .../spy/memcached/ops/SetAttrOperation.java | 28 + .../net/spy/memcached/ops/StatsOperation.java | 22 + .../net/spy/memcached/ops/StoreOperation.java | 32 + .../java/net/spy/memcached/ops/StoreType.java | 21 + .../spy/memcached/ops/VersionOperation.java | 8 + .../java/net/spy/memcached/ops/package.html | 14 + src/main/java/net/spy/memcached/overview.html | 26 + src/main/java/net/spy/memcached/package.html | 32 + .../memcached/plugin/FrontCacheGetFuture.java | 72 + .../plugin/FrontCacheMemcachedClient.java | 102 + .../memcached/plugin/LocalCacheManager.java | 179 + .../memcached/protocol/BaseOperationImpl.java | 165 + .../protocol/GetCallbackWrapper.java | 48 + .../spy/memcached/protocol/ProxyCallback.java | 65 + .../protocol/TCPMemcachedNodeImpl.java | 565 +++ .../ascii/AsciiMemcachedNodeImpl.java | 52 + .../protocol/ascii/AsciiOperationFactory.java | 295 ++ .../ascii/BTreeFindPositionOperationImpl.java | 134 + .../ascii/BTreeGetBulkOperationImpl.java | 251 ++ .../BTreeGetByPositionOperationImpl.java | 261 ++ .../ascii/BTreeSortMergeGetOperationImpl.java | 305 ++ .../ascii/BTreeStoreAndGetOperationImpl.java | 295 ++ .../protocol/ascii/BaseGetOpImpl.java | 153 + .../ascii/BaseStoreOperationImpl.java | 100 + .../protocol/ascii/CASOperationImpl.java | 99 + .../CollectionBulkStoreOperationImpl.java | 135 + .../ascii/CollectionCountOperationImpl.java | 111 + .../ascii/CollectionCreateOperationImpl.java | 99 + .../ascii/CollectionDeleteOperationImpl.java | 117 + .../ascii/CollectionExistOperationImpl.java | 119 + .../ascii/CollectionGetOperationImpl.java | 236 + .../ascii/CollectionMutateOperationImpl.java | 114 + .../CollectionPipedExistOperationImpl.java | 133 + .../CollectionPipedStoreOperationImpl.java | 134 + .../CollectionPipedUpdateOperationImpl.java | 136 + .../ascii/CollectionStoreOperationImpl.java | 129 + .../ascii/CollectionUpdateOperationImpl.java | 137 + .../ascii/CollectionUpsertOperationImpl.java | 134 + .../ascii/ConcatenationOperationImpl.java | 30 + .../protocol/ascii/DeleteOperationImpl.java | 55 + .../ascii/ExtendedBTreeGetOperationImpl.java | 238 + .../ascii/FlushByPrefixOperationImpl.java | 70 + .../protocol/ascii/FlushOperationImpl.java | 49 + .../protocol/ascii/GetAttrOperationImpl.java | 105 + .../protocol/ascii/GetOperationImpl.java | 26 + .../protocol/ascii/GetsOperationImpl.java | 18 + .../protocol/ascii/MutatorOperationImpl.java | 111 + .../protocol/ascii/OperationImpl.java | 160 + .../protocol/ascii/OperationReadType.java | 15 + .../protocol/ascii/OptimizedGetImpl.java | 31 + .../protocol/ascii/SetAttrOperationImpl.java | 100 + .../protocol/ascii/StatsOperationImpl.java | 72 + .../protocol/ascii/StoreOperationImpl.java | 28 + .../protocol/ascii/VersionOperationImpl.java | 38 + .../spy/memcached/protocol/ascii/package.html | 14 + .../binary/BinaryMemcachedNodeImpl.java | 89 + .../binary/BinaryOperationFactory.java | 339 ++ .../binary/ConcatenationOperationImpl.java | 80 + .../protocol/binary/DeleteOperationImpl.java | 42 + .../protocol/binary/FlushOperationImpl.java | 25 + .../protocol/binary/GetOperationImpl.java | 62 + .../binary/MultiGetOperationImpl.java | 116 + .../protocol/binary/MutatorOperationImpl.java | 80 + .../protocol/binary/NoopOperationImpl.java | 22 + .../protocol/binary/OperationImpl.java | 295 ++ .../protocol/binary/OptimizedGetImpl.java | 34 + .../protocol/binary/OptimizedSetImpl.java | 171 + .../binary/SASLAuthOperationImpl.java | 29 + .../binary/SASLBaseOperationImpl.java | 73 + .../binary/SASLMechsOperationImpl.java | 27 + .../binary/SASLStepOperationImpl.java | 27 + .../protocol/binary/StatsOperationImpl.java | 41 + .../protocol/binary/StoreOperationImpl.java | 103 + .../protocol/binary/VersionOperationImpl.java | 25 + .../memcached/protocol/binary/package.html | 14 + .../net/spy/memcached/protocol/package.html | 14 + .../BaseSerializingTranscoder.java | 212 + .../transcoders/CollectionTranscoder.java | 161 + .../InspectObjectSizeTranscoder.java | 41 + .../transcoders/IntegerTranscoder.java | 38 + .../memcached/transcoders/LongTranscoder.java | 40 + .../transcoders/SerializingTranscoder.java | 171 + .../transcoders/TranscodeService.java | 94 + .../spy/memcached/transcoders/Transcoder.java | 39 + .../transcoders/TranscoderUtils.java | 88 + .../transcoders/WhalinTranscoder.java | 190 + .../transcoders/WhalinV1Transcoder.java | 284 ++ .../spy/memcached/transcoders/package.html | 14 + .../ArcusKetamaNodeLocatorConfiguration.java | 32 + .../net/spy/memcached/util/BTreeUtil.java | 78 + .../net/spy/memcached/util/CacheLoader.java | 147 + ...DefaultKetamaNodeLocatorConfiguration.java | 95 + .../util/KetamaNodeLocatorConfiguration.java | 30 + .../java/net/spy/memcached/util/package.html | 18 + .../memcached/AbstractNodeLocationCase.java | 62 + .../java/net/spy/memcached/AddrUtilTest.java | 124 + .../memcached/ArcusKetamaNodeLocatorTest.java | 3423 +++++++++++++++ .../memcached/ArrayModNodeLocatorTest.java | 70 + .../spy/memcached/AsciiCancellationTest.java | 8 + .../net/spy/memcached/AsciiClientTest.java | 40 + .../spy/memcached/AsciiIPV6ClientTest.java | 19 + .../spy/memcached/BinaryCancellationTest.java | 18 + .../net/spy/memcached/BinaryClientTest.java | 65 + .../spy/memcached/BinaryIPV6ClientTest.java | 19 + .../net/spy/memcached/CASMutatorTest.java | 69 + .../java/net/spy/memcached/CacheMapTest.java | 149 + .../net/spy/memcached/CacheMonitorTest.java | 166 + .../spy/memcached/CancelFailureModeTest.java | 51 + .../spy/memcached/CancellationBaseCase.java | 110 + .../net/spy/memcached/ClientBaseCase.java | 312 ++ .../ConnectionFactoryBuilderTest.java | 204 + .../spy/memcached/ConnectionFactoryTest.java | 33 + .../spy/memcached/ConsistentHashingTest.java | 128 + .../java/net/spy/memcached/DoLotsOfSets.java | 35 + .../net/spy/memcached/HashAlgorithmTest.java | 125 + .../KetamaConnectionFactoryTest.java | 25 + .../spy/memcached/KetamaNodeLocatorTest.java | 3407 +++++++++++++++ .../net/spy/memcached/LongClientTest.java | 70 + .../MemcachedClientConstructorTest.java | 167 + .../memcached/MemcachedConnectionTest.java | 134 + .../memcached/MemcachedNodeROImplTest.java | 63 + .../net/spy/memcached/MockMemcachedNode.java | 149 + .../java/net/spy/memcached/ObserverTest.java | 71 + .../memcached/OperationFactoryTestBase.java | 256 ++ .../net/spy/memcached/ProtocolBaseCase.java | 863 ++++ .../net/spy/memcached/QueueOverflowTest.java | 131 + .../RedistributeFailureModeTest.java | 69 + .../java/net/spy/memcached/TimeoutTest.java | 70 + .../spy/memcached/compat/BaseMockCase.java | 14 + .../spy/memcached/compat/log/LoggingTest.java | 147 + .../CheckedOperationTimeoutExceptionTest.java | 84 + .../SingleElementInfiniteIteratorTest.java | 31 + .../memcached/protocol/ascii/BaseOpTest.java | 138 + .../ascii/ExtensibleOperationImpl.java | 18 + .../ascii/OperationExceptionTest.java | 41 + .../protocol/ascii/OperationFactoryTest.java | 49 + .../protocol/binary/OperationFactoryTest.java | 13 + .../protocol/binary/OperatonTest.java | 38 + .../BaseSerializingTranscoderTest.java | 150 + .../transcoders/BaseTranscoderCase.java | 181 + .../memcached/transcoders/CachedDataTest.java | 21 + .../transcoders/IntegerTranscoderTest.java | 28 + .../transcoders/LongTranscoderTest.java | 28 + .../SerializingTranscoderTest.java | 112 + .../transcoders/TranscodeServiceTest.java | 59 + .../transcoders/TranscoderUtilsTest.java | 65 + .../transcoders/WhalinTranscoderTest.java | 111 + .../transcoders/WhalinV1TranscoderTest.java | 31 + .../net/spy/memcached/util/BTreeUtilTest.java | 80 + .../spy/memcached/util/CacheLoaderTest.java | 88 + .../spy/memcached/ArcusClientConnectTest.java | 41 + .../memcached/ArcusClientFrontCacheTest.java | 70 + .../ArcusClientNotExistsServiceCodeTest.java | 48 + .../ArcusClientPoolReconnectTest.java | 55 + .../ArcusClientPoolShutdownTest.java | 92 + .../memcached/ArcusClientReconnectTest.java | 59 + .../memcached/ArcusClientShutdownTest.java | 90 + .../ByteArrayBKeySMGetErrorTest.java | 272 ++ .../ByteArrayBKeySMGetIrregularEflagTest.java | 76 + .../btreesmget/ByteArrayBKeySMGetTest.java | 482 ++ .../memcached/btreesmget/SMGetErrorTest.java | 359 ++ .../spy/memcached/btreesmget/SMGetTest.java | 460 ++ .../SMGetTestWithCombinationEflag.java | 488 +++ .../btreesmget/SMGetTestWithEflag.java | 450 ++ .../BopInsertBulkMultipleBoundaryTest.java | 71 + .../BopInsertBulkMultipleTest.java | 257 ++ .../bulkoperation/BopInsertBulkTest.java | 408 ++ .../bulkoperation/BopPipeUpdateTest.java | 279 ++ .../memcached/bulkoperation/BulkSetTest.java | 278 ++ .../bulkoperation/BulkSetVariousTypeTest.java | 85 + .../LopInsertBulkMultipleValueTest.java | 239 + .../bulkoperation/LopInsertBulkTest.java | 143 + .../bulkoperation/PipeInsertTest.java | 153 + .../bulkoperation/SetBulkTimeoutTest.java | 63 + .../SopInsertBulkMultipleValueTest.java | 241 + .../bulkoperation/SopInsertBulkTest.java | 153 + .../collection/BaseIntegrationTest.java | 167 + .../collection/CollectionFutureTest.java | 96 + .../collection/CollectionMaxElementSize.java | 51 + .../collection/ElementFlagFilterTest.java | 98 + .../attribute/BTreeGetAttrTest.java | 188 + .../collection/attribute/GetAttrTest.java | 97 + .../attribute/MaxBkeyRangeTest.java | 142 + .../collection/attribute/SetAttrTest.java | 27 + .../attribute/UnReadableBTreeTest.java | 130 + .../UnReadableExtendedBTreeTest.java | 137 + .../attribute/UnReadableListTest.java | 126 + .../attribute/UnReadableSetTest.java | 127 + .../collection/btree/BopDeleteTest.java | 109 + .../collection/btree/BopFindPositionTest.java | 185 + .../collection/btree/BopGetBulkTest.java | 382 ++ .../btree/BopGetByPositionTest.java | 437 ++ .../collection/btree/BopGetExceptionTest.java | 72 + .../btree/BopGetIrregularEflagTest.java | 139 + .../btree/BopGetOffsetSupportTest.java | 105 + .../collection/btree/BopGetSortTest.java | 114 + .../BopInsertAndGetWithElementFlagTest.java | 179 + .../btree/BopInsertWhenKeyExists.java | 142 + .../btree/BopInsertWhenKeyNotExist.java | 115 + .../collection/btree/BopMutateTest.java | 106 + .../btree/BopOverflowActionTest.java | 245 ++ .../btree/BopServerMessageTest.java | 224 + .../collection/btree/BopStoreAndGetTest.java | 485 ++ .../collection/btree/BopUpdateTest.java | 307 ++ .../collection/btree/BopUpsertTest.java | 51 + .../BopCountWithElementFlagFilterTest.java | 192 + .../btree/longbkey/BopGetBulkTest.java | 397 ++ .../longbkey/BopGetIrregularEflagTest.java | 149 + .../collection/btree/longbkey/BopGetTest.java | 67 + .../BopInsertAndGetWithElementFlagTest.java | 123 + .../btree/longbkey/BopUpdateTest.java | 161 + .../btree/longbkey/BopUpsertTest.java | 139 + .../collection/list/LopBulkAPITest.java | 122 + .../collection/list/LopDeleteTest.java | 85 + .../memcached/collection/list/LopGetTest.java | 104 + .../collection/list/LopInsertBoundary.java | 232 + .../collection/list/LopInsertDataType.java | 120 + .../list/LopInsertWhenKeyExists.java | 92 + .../list/LopInsertWhenKeyNotExist.java | 149 + .../list/LopOverflowActionTest.java | 217 + .../collection/list/LopServerMessageTest.java | 199 + .../collection/set/SopBulkAPITest.java | 123 + .../collection/set/SopDeleteTest.java | 67 + .../collection/set/SopExistTest.java | 78 + .../set/SopInsertWhenKeyExists.java | 86 + .../set/SopInsertWhenKeyNotExist.java | 109 + .../collection/set/SopOverflowActionTest.java | 105 + .../collection/set/SopPipedExistTest.java | 258 ++ .../collection/set/SopServerMessageTest.java | 184 + .../BTreeDeleteWithFilterTest.java | 97 + .../BTreeGetWithFilterTest.java | 195 + .../emptycollection/CreateEmptyBTreeTest.java | 87 + .../emptycollection/CreateEmptyListTest.java | 88 + .../emptycollection/CreateEmptySetTest.java | 88 + .../emptycollection/GetCountBTreeTest.java | 246 ++ ...etCountBTreeTestWithElementFlagFilter.java | 255 ++ .../emptycollection/GetWithDropBTreeTest.java | 121 + .../emptycollection/GetWithDropListTest.java | 113 + .../emptycollection/GetWithDropSetTest.java | 108 + .../InsertBTreeWithAttrAndEFlagTest.java | 154 + .../InsertListWithAttrTest.java | 96 + .../InsertSetWithAttrTest.java | 93 + .../PipedBulkInsertBTreeWithAttrTest.java | 213 + .../PipedBulkInsertListWithAttrTest.java | 176 + .../PipedBulkInsertSetWithAttrTest.java | 176 + .../ProtocolBTreeDeleteTest.java | 57 + .../emptycollection/ProtocolBTreeGetTest.java | 60 + .../ProtocolListDeleteTest.java | 54 + .../emptycollection/ProtocolListGetTest.java | 57 + .../ProtocolSetDeleteTest.java | 52 + .../emptycollection/ProtocolSetGetTest.java | 44 + .../emptycollection/VariousTypeTest.java | 363 ++ .../flushbyprefix/FlushByPrefixTest.java | 139 + .../frontcache/LocalCacheManagerTest.java | 228 + .../net/spy/memcached/test/AuthTest.java | 58 + .../test/ExcessivelyLargeGetTest.java | 67 + .../net/spy/memcached/test/LoaderTest.java | 64 + .../memcached/test/MemcachedThreadBench.java | 199 + .../spy/memcached/test/MemoryFullTest.java | 56 + .../memcached/test/MultiNodeFailureTest.java | 27 + .../memcached/test/MutateWithDefaultTest.java | 118 + .../net/spy/memcached/test/ObserverToy.java | 52 + .../memcached/test/SASLConnectReconnect.java | 148 + src/test/resources/log4j.xml | 85 + xdocs/index.xml | 24 + 458 files changed, 64441 insertions(+) create mode 100644 .gitignore create mode 100644 AUTHORS create mode 100644 LICENSE create mode 100644 LICENSE.txt-spymemcached create mode 100644 PATENTS create mode 100644 README.md create mode 100644 buildfile create mode 100644 docs/arcus-java-client-getting-started.md create mode 100644 etc/MemcachedOptimization.graffle create mode 100644 pom.xml create mode 100644 src/main/java/net/spy/memcached/AddrUtil.java create mode 100644 src/main/java/net/spy/memcached/ArcusClient.java create mode 100644 src/main/java/net/spy/memcached/ArcusClientException.java create mode 100644 src/main/java/net/spy/memcached/ArcusClientIF.java create mode 100644 src/main/java/net/spy/memcached/ArcusClientPool.java create mode 100644 src/main/java/net/spy/memcached/ArcusKetamaNodeLocator.java create mode 100644 src/main/java/net/spy/memcached/ArcusMBeanServer.java create mode 100644 src/main/java/net/spy/memcached/ArrayModNodeLocator.java create mode 100644 src/main/java/net/spy/memcached/BaseCacheMap.java create mode 100644 src/main/java/net/spy/memcached/BinaryConnectionFactory.java create mode 100644 src/main/java/net/spy/memcached/BroadcastOpFactory.java create mode 100644 src/main/java/net/spy/memcached/BulkService.java create mode 100644 src/main/java/net/spy/memcached/CASMutation.java create mode 100644 src/main/java/net/spy/memcached/CASMutator.java create mode 100644 src/main/java/net/spy/memcached/CASResponse.java create mode 100644 src/main/java/net/spy/memcached/CASValue.java create mode 100644 src/main/java/net/spy/memcached/CacheManager.java create mode 100644 src/main/java/net/spy/memcached/CacheMap.java create mode 100644 src/main/java/net/spy/memcached/CacheMonitor.java create mode 100644 src/main/java/net/spy/memcached/CachedData.java create mode 100644 src/main/java/net/spy/memcached/CollectionOperationException.java create mode 100644 src/main/java/net/spy/memcached/ConnectionFactory.java create mode 100644 src/main/java/net/spy/memcached/ConnectionFactoryBuilder.java create mode 100644 src/main/java/net/spy/memcached/ConnectionObserver.java create mode 100644 src/main/java/net/spy/memcached/DefaultConnectionFactory.java create mode 100644 src/main/java/net/spy/memcached/FailureMode.java create mode 100644 src/main/java/net/spy/memcached/HashAlgorithm.java create mode 100644 src/main/java/net/spy/memcached/KetamaConnectionFactory.java create mode 100644 src/main/java/net/spy/memcached/KetamaNodeLocator.java create mode 100644 src/main/java/net/spy/memcached/KeyUtil.java create mode 100644 src/main/java/net/spy/memcached/MemcachedClient.java create mode 100644 src/main/java/net/spy/memcached/MemcachedClientIF.java create mode 100644 src/main/java/net/spy/memcached/MemcachedConnection.java create mode 100644 src/main/java/net/spy/memcached/MemcachedNode.java create mode 100644 src/main/java/net/spy/memcached/MemcachedNodeROImpl.java create mode 100644 src/main/java/net/spy/memcached/NodeLocator.java create mode 100644 src/main/java/net/spy/memcached/NotExistsServiceCodeException.java create mode 100644 src/main/java/net/spy/memcached/OperationFactory.java create mode 100755 src/main/java/net/spy/memcached/OperationTimeoutException.java create mode 100644 src/main/java/net/spy/memcached/PartitionedList.java create mode 100644 src/main/java/net/spy/memcached/PartitionedMap.java create mode 100644 src/main/java/net/spy/memcached/StatisticsHandler.java create mode 100644 src/main/java/net/spy/memcached/auth/AuthDescriptor.java create mode 100644 src/main/java/net/spy/memcached/auth/AuthThread.java create mode 100644 src/main/java/net/spy/memcached/auth/AuthThreadMonitor.java create mode 100644 src/main/java/net/spy/memcached/auth/PlainCallbackHandler.java create mode 100644 src/main/java/net/spy/memcached/auth/package.html create mode 100644 src/main/java/net/spy/memcached/collection/Attributes.java create mode 100644 src/main/java/net/spy/memcached/collection/BKeyObject.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeCount.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeCreate.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeDelete.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeElement.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeFindPosition.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGet.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGetBulk.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGetBulkImpl.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGetBulkWithByteTypeBkey.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGetBulkWithLongTypeBkey.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGetByPosition.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeGetResult.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeMutate.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeOrder.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeSMGet.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeSMGetWithByteTypeBkey.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeSMGetWithLongTypeBkey.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeStore.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeStoreAndGet.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeUpdate.java create mode 100644 src/main/java/net/spy/memcached/collection/BTreeUpsert.java create mode 100644 src/main/java/net/spy/memcached/collection/ByteArrayBKey.java create mode 100644 src/main/java/net/spy/memcached/collection/ByteArrayTreeMap.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionAttributes.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionBulkStore.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionCount.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionCreate.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionDelete.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionExist.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionGet.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionMutate.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionObject.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionOverflowAction.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionPipedStore.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionPipedUpdate.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionResponse.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionStore.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionType.java create mode 100644 src/main/java/net/spy/memcached/collection/CollectionUpdate.java create mode 100644 src/main/java/net/spy/memcached/collection/Element.java create mode 100644 src/main/java/net/spy/memcached/collection/ElementFlagFilter.java create mode 100644 src/main/java/net/spy/memcached/collection/ElementFlagUpdate.java create mode 100644 src/main/java/net/spy/memcached/collection/ElementMultiFlagsFilter.java create mode 100644 src/main/java/net/spy/memcached/collection/ElementValueType.java create mode 100644 src/main/java/net/spy/memcached/collection/ExtendedBTreeGet.java create mode 100644 src/main/java/net/spy/memcached/collection/ListCreate.java create mode 100644 src/main/java/net/spy/memcached/collection/ListDelete.java create mode 100644 src/main/java/net/spy/memcached/collection/ListGet.java create mode 100644 src/main/java/net/spy/memcached/collection/ListStore.java create mode 100644 src/main/java/net/spy/memcached/collection/RequestMode.java create mode 100644 src/main/java/net/spy/memcached/collection/SMGetElement.java create mode 100644 src/main/java/net/spy/memcached/collection/SetCreate.java create mode 100644 src/main/java/net/spy/memcached/collection/SetDelete.java create mode 100644 src/main/java/net/spy/memcached/collection/SetExist.java create mode 100644 src/main/java/net/spy/memcached/collection/SetGet.java create mode 100644 src/main/java/net/spy/memcached/collection/SetPipedExist.java create mode 100644 src/main/java/net/spy/memcached/collection/SetStore.java create mode 100644 src/main/java/net/spy/memcached/compat/CloseUtil.java create mode 100644 src/main/java/net/spy/memcached/compat/SpyObject.java create mode 100644 src/main/java/net/spy/memcached/compat/SpyThread.java create mode 100644 src/main/java/net/spy/memcached/compat/SyncThread.java create mode 100644 src/main/java/net/spy/memcached/compat/log/AbstractLogger.java create mode 100644 src/main/java/net/spy/memcached/compat/log/DefaultLogger.java create mode 100644 src/main/java/net/spy/memcached/compat/log/Level.java create mode 100644 src/main/java/net/spy/memcached/compat/log/Log4JLogger.java create mode 100644 src/main/java/net/spy/memcached/compat/log/Logger.java create mode 100644 src/main/java/net/spy/memcached/compat/log/LoggerFactory.java create mode 100644 src/main/java/net/spy/memcached/compat/log/SunLogger.java create mode 100644 src/main/java/net/spy/memcached/compat/log/package.html create mode 100644 src/main/java/net/spy/memcached/compat/package.html create mode 100644 src/main/java/net/spy/memcached/internal/BTreeStoreAndGetFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/BasicThreadFactory.java create mode 100644 src/main/java/net/spy/memcached/internal/BulkFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/BulkGetFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/CheckedOperationTimeoutException.java create mode 100644 src/main/java/net/spy/memcached/internal/CollectionFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/CollectionGetBulkFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/GetFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/ImmediateFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/OperationFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/ReconnectJob.java create mode 100644 src/main/java/net/spy/memcached/internal/SMGetFuture.java create mode 100644 src/main/java/net/spy/memcached/internal/SingleElementInfiniteIterator.java create mode 100644 src/main/java/net/spy/memcached/internal/package.html create mode 100644 src/main/java/net/spy/memcached/ops/ArrayOperationQueueFactory.java create mode 100644 src/main/java/net/spy/memcached/ops/BTreeFindPositionOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/BTreeGetBulkOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/BTreeGetByPositionOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/BTreeSortMergeGetOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/BTreeStoreAndGetOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/BaseOperationFactory.java create mode 100644 src/main/java/net/spy/memcached/ops/CASOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CASOperationStatus.java create mode 100644 src/main/java/net/spy/memcached/ops/CancelledOperationStatus.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionBulkStoreOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionCountOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionCreateOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionDeleteOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionExistOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionGetOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionMutateOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionOperationStatus.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionPipedExistOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionPipedStoreOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionPipedUpdateOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionStoreOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionUpdateOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/CollectionUpsertOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/ConcatenationOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/ConcatenationType.java create mode 100644 src/main/java/net/spy/memcached/ops/DeleteOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/ExtendedBTreeGetOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/FlushOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/GetAttrOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/GetOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/GetsOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/KeyedOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/LinkedOperationQueueFactory.java create mode 100644 src/main/java/net/spy/memcached/ops/MultiGetOperationCallback.java create mode 100644 src/main/java/net/spy/memcached/ops/MultiGetsOperationCallback.java create mode 100644 src/main/java/net/spy/memcached/ops/MultiOperationCallback.java create mode 100644 src/main/java/net/spy/memcached/ops/Mutator.java create mode 100644 src/main/java/net/spy/memcached/ops/MutatorOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/NoopOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/Operation.java create mode 100644 src/main/java/net/spy/memcached/ops/OperationCallback.java create mode 100644 src/main/java/net/spy/memcached/ops/OperationErrorType.java create mode 100644 src/main/java/net/spy/memcached/ops/OperationException.java create mode 100644 src/main/java/net/spy/memcached/ops/OperationQueueFactory.java create mode 100644 src/main/java/net/spy/memcached/ops/OperationState.java create mode 100644 src/main/java/net/spy/memcached/ops/OperationStatus.java create mode 100644 src/main/java/net/spy/memcached/ops/SASLAuthOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/SASLMechsOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/SASLStepOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/SetAttrOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/StatsOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/StoreOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/StoreType.java create mode 100644 src/main/java/net/spy/memcached/ops/VersionOperation.java create mode 100644 src/main/java/net/spy/memcached/ops/package.html create mode 100644 src/main/java/net/spy/memcached/overview.html create mode 100644 src/main/java/net/spy/memcached/package.html create mode 100644 src/main/java/net/spy/memcached/plugin/FrontCacheGetFuture.java create mode 100644 src/main/java/net/spy/memcached/plugin/FrontCacheMemcachedClient.java create mode 100644 src/main/java/net/spy/memcached/plugin/LocalCacheManager.java create mode 100644 src/main/java/net/spy/memcached/protocol/BaseOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/GetCallbackWrapper.java create mode 100644 src/main/java/net/spy/memcached/protocol/ProxyCallback.java create mode 100644 src/main/java/net/spy/memcached/protocol/TCPMemcachedNodeImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/AsciiMemcachedNodeImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/AsciiOperationFactory.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BTreeFindPositionOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BTreeGetBulkOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BTreeGetByPositionOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BTreeSortMergeGetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BTreeStoreAndGetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BaseGetOpImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/BaseStoreOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CASOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionBulkStoreOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionCountOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionCreateOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionDeleteOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionExistOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionGetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionMutateOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedExistOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedStoreOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedUpdateOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionStoreOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionUpdateOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/CollectionUpsertOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/ConcatenationOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/DeleteOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/ExtendedBTreeGetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/FlushByPrefixOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/FlushOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/GetAttrOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/GetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/GetsOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/MutatorOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/OperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/OperationReadType.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/OptimizedGetImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/SetAttrOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/StatsOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/StoreOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/VersionOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/ascii/package.html create mode 100644 src/main/java/net/spy/memcached/protocol/binary/BinaryMemcachedNodeImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/BinaryOperationFactory.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/ConcatenationOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/DeleteOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/FlushOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/GetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/MultiGetOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/MutatorOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/NoopOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/OperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/OptimizedGetImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/OptimizedSetImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/SASLAuthOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/SASLBaseOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/SASLMechsOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/SASLStepOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/StatsOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/StoreOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/VersionOperationImpl.java create mode 100644 src/main/java/net/spy/memcached/protocol/binary/package.html create mode 100644 src/main/java/net/spy/memcached/protocol/package.html create mode 100644 src/main/java/net/spy/memcached/transcoders/BaseSerializingTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/CollectionTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/InspectObjectSizeTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/IntegerTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/LongTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/SerializingTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/TranscodeService.java create mode 100644 src/main/java/net/spy/memcached/transcoders/Transcoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/TranscoderUtils.java create mode 100644 src/main/java/net/spy/memcached/transcoders/WhalinTranscoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/WhalinV1Transcoder.java create mode 100644 src/main/java/net/spy/memcached/transcoders/package.html create mode 100644 src/main/java/net/spy/memcached/util/ArcusKetamaNodeLocatorConfiguration.java create mode 100644 src/main/java/net/spy/memcached/util/BTreeUtil.java create mode 100644 src/main/java/net/spy/memcached/util/CacheLoader.java create mode 100644 src/main/java/net/spy/memcached/util/DefaultKetamaNodeLocatorConfiguration.java create mode 100644 src/main/java/net/spy/memcached/util/KetamaNodeLocatorConfiguration.java create mode 100644 src/main/java/net/spy/memcached/util/package.html create mode 100644 src/test/java/net/spy/memcached/AbstractNodeLocationCase.java create mode 100644 src/test/java/net/spy/memcached/AddrUtilTest.java create mode 100644 src/test/java/net/spy/memcached/ArcusKetamaNodeLocatorTest.java create mode 100644 src/test/java/net/spy/memcached/ArrayModNodeLocatorTest.java create mode 100644 src/test/java/net/spy/memcached/AsciiCancellationTest.java create mode 100644 src/test/java/net/spy/memcached/AsciiClientTest.java create mode 100644 src/test/java/net/spy/memcached/AsciiIPV6ClientTest.java create mode 100644 src/test/java/net/spy/memcached/BinaryCancellationTest.java create mode 100644 src/test/java/net/spy/memcached/BinaryClientTest.java create mode 100644 src/test/java/net/spy/memcached/BinaryIPV6ClientTest.java create mode 100644 src/test/java/net/spy/memcached/CASMutatorTest.java create mode 100644 src/test/java/net/spy/memcached/CacheMapTest.java create mode 100644 src/test/java/net/spy/memcached/CacheMonitorTest.java create mode 100644 src/test/java/net/spy/memcached/CancelFailureModeTest.java create mode 100644 src/test/java/net/spy/memcached/CancellationBaseCase.java create mode 100644 src/test/java/net/spy/memcached/ClientBaseCase.java create mode 100644 src/test/java/net/spy/memcached/ConnectionFactoryBuilderTest.java create mode 100644 src/test/java/net/spy/memcached/ConnectionFactoryTest.java create mode 100644 src/test/java/net/spy/memcached/ConsistentHashingTest.java create mode 100644 src/test/java/net/spy/memcached/DoLotsOfSets.java create mode 100644 src/test/java/net/spy/memcached/HashAlgorithmTest.java create mode 100644 src/test/java/net/spy/memcached/KetamaConnectionFactoryTest.java create mode 100644 src/test/java/net/spy/memcached/KetamaNodeLocatorTest.java create mode 100644 src/test/java/net/spy/memcached/LongClientTest.java create mode 100644 src/test/java/net/spy/memcached/MemcachedClientConstructorTest.java create mode 100644 src/test/java/net/spy/memcached/MemcachedConnectionTest.java create mode 100644 src/test/java/net/spy/memcached/MemcachedNodeROImplTest.java create mode 100644 src/test/java/net/spy/memcached/MockMemcachedNode.java create mode 100644 src/test/java/net/spy/memcached/ObserverTest.java create mode 100644 src/test/java/net/spy/memcached/OperationFactoryTestBase.java create mode 100644 src/test/java/net/spy/memcached/ProtocolBaseCase.java create mode 100644 src/test/java/net/spy/memcached/QueueOverflowTest.java create mode 100644 src/test/java/net/spy/memcached/RedistributeFailureModeTest.java create mode 100644 src/test/java/net/spy/memcached/TimeoutTest.java create mode 100644 src/test/java/net/spy/memcached/compat/BaseMockCase.java create mode 100644 src/test/java/net/spy/memcached/compat/log/LoggingTest.java create mode 100644 src/test/java/net/spy/memcached/internal/CheckedOperationTimeoutExceptionTest.java create mode 100644 src/test/java/net/spy/memcached/internal/SingleElementInfiniteIteratorTest.java create mode 100644 src/test/java/net/spy/memcached/protocol/ascii/BaseOpTest.java create mode 100644 src/test/java/net/spy/memcached/protocol/ascii/ExtensibleOperationImpl.java create mode 100644 src/test/java/net/spy/memcached/protocol/ascii/OperationExceptionTest.java create mode 100644 src/test/java/net/spy/memcached/protocol/ascii/OperationFactoryTest.java create mode 100644 src/test/java/net/spy/memcached/protocol/binary/OperationFactoryTest.java create mode 100644 src/test/java/net/spy/memcached/protocol/binary/OperatonTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/BaseSerializingTranscoderTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/BaseTranscoderCase.java create mode 100644 src/test/java/net/spy/memcached/transcoders/CachedDataTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/IntegerTranscoderTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/LongTranscoderTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/SerializingTranscoderTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/TranscodeServiceTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/TranscoderUtilsTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/WhalinTranscoderTest.java create mode 100644 src/test/java/net/spy/memcached/transcoders/WhalinV1TranscoderTest.java create mode 100644 src/test/java/net/spy/memcached/util/BTreeUtilTest.java create mode 100644 src/test/java/net/spy/memcached/util/CacheLoaderTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientConnectTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientFrontCacheTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientNotExistsServiceCodeTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientPoolReconnectTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientPoolShutdownTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientReconnectTest.java create mode 100644 src/test/manual/net/spy/memcached/ArcusClientShutdownTest.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetErrorTest.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetIrregularEflagTest.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetTest.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/SMGetErrorTest.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/SMGetTest.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithCombinationEflag.java create mode 100644 src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithEflag.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleBoundaryTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/BopPipeUpdateTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/BulkSetTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/BulkSetVariousTypeTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkMultipleValueTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/PipeInsertTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/SetBulkTimeoutTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkMultipleValueTest.java create mode 100644 src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/BaseIntegrationTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/CollectionFutureTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/CollectionMaxElementSize.java create mode 100644 src/test/manual/net/spy/memcached/collection/ElementFlagFilterTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/BTreeGetAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/GetAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/MaxBkeyRangeTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/SetAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/UnReadableBTreeTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/UnReadableExtendedBTreeTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/UnReadableListTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/attribute/UnReadableSetTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopDeleteTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopFindPositionTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopGetBulkTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopGetByPositionTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopGetExceptionTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopGetIrregularEflagTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopGetOffsetSupportTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopGetSortTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopInsertAndGetWithElementFlagTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyExists.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyNotExist.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopMutateTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopOverflowActionTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopServerMessageTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopStoreAndGetTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopUpdateTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/BopUpsertTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopCountWithElementFlagFilterTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetBulkTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetIrregularEflagTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopInsertAndGetWithElementFlagTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpdateTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpsertTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopBulkAPITest.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopDeleteTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopGetTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopInsertBoundary.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopInsertDataType.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyExists.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyNotExist.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopOverflowActionTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/list/LopServerMessageTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopBulkAPITest.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopDeleteTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopExistTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyExists.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyNotExist.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopOverflowActionTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopPipedExistTest.java create mode 100644 src/test/manual/net/spy/memcached/collection/set/SopServerMessageTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/BTreeDeleteWithFilterTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/BTreeGetWithFilterTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/CreateEmptyBTreeTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/CreateEmptyListTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/CreateEmptySetTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTestWithElementFlagFilter.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/GetWithDropBTreeTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/GetWithDropListTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/GetWithDropSetTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/InsertBTreeWithAttrAndEFlagTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/InsertListWithAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/InsertSetWithAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertBTreeWithAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertListWithAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertSetWithAttrTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeDeleteTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeGetTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/ProtocolListDeleteTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/ProtocolListGetTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/ProtocolSetDeleteTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/ProtocolSetGetTest.java create mode 100644 src/test/manual/net/spy/memcached/emptycollection/VariousTypeTest.java create mode 100644 src/test/manual/net/spy/memcached/flushbyprefix/FlushByPrefixTest.java create mode 100644 src/test/manual/net/spy/memcached/frontcache/LocalCacheManagerTest.java create mode 100644 src/test/manual/net/spy/memcached/test/AuthTest.java create mode 100644 src/test/manual/net/spy/memcached/test/ExcessivelyLargeGetTest.java create mode 100644 src/test/manual/net/spy/memcached/test/LoaderTest.java create mode 100644 src/test/manual/net/spy/memcached/test/MemcachedThreadBench.java create mode 100644 src/test/manual/net/spy/memcached/test/MemoryFullTest.java create mode 100644 src/test/manual/net/spy/memcached/test/MultiNodeFailureTest.java create mode 100644 src/test/manual/net/spy/memcached/test/MutateWithDefaultTest.java create mode 100644 src/test/manual/net/spy/memcached/test/ObserverToy.java create mode 100644 src/test/manual/net/spy/memcached/test/SASLConnectReconnect.java create mode 100644 src/test/resources/log4j.xml create mode 100644 xdocs/index.xml diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..e2edc5c0a --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +.settings +cobertura.ser +target/ +reports/ +junit* +*.iml +*~ \ No newline at end of file diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 000000000..93592090c --- /dev/null +++ b/AUTHORS @@ -0,0 +1,5 @@ +Chisu Ryu (netspider) ; +Hoonmin Kim (harebox) ; +YeaSol Kim (ngleader) ; +SeongHwa Ahn ; +Hyongyoub Kim diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE.txt-spymemcached b/LICENSE.txt-spymemcached new file mode 100644 index 000000000..010cb4a9e --- /dev/null +++ b/LICENSE.txt-spymemcached @@ -0,0 +1,19 @@ +Copyright (c) 2006-2009 Dustin Sallings + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/PATENTS b/PATENTS new file mode 100644 index 000000000..035560bfb --- /dev/null +++ b/PATENTS @@ -0,0 +1,7 @@ +Arcus has patents on the implementation of b+tree smget operation like below. + +| Nation | Number | Title | Status | +| ------------- | --------------- | ------------------- | ---------- | +| United States | 13/474,382 | Multiple Range Scan | registered | +| Korea | 10-2011-0054384 | Multiple Range Scan | applied | +| Japan | 2012-092668 | Multiple Range Scan | applied | diff --git a/README.md b/README.md new file mode 100644 index 000000000..97a2e8f10 --- /dev/null +++ b/README.md @@ -0,0 +1,148 @@ +## arcus-java-client: Arcus Java Client + +This is a fork of [spymemcached][spymemcached] with the following modifications +to support Arcus memcached cloud. + +- Collection data types + - List: A doubly-linked list. + - Set: An unordered set of unique data. + - B+Tree: A B+Tree structure similar to sorted map. +- ZooKeeper based clustering + +[spymemcached]: https://code.google.com/p/spymemcached/ "spymemcached" + +## Getting Started + +The latest artifacts would be published to maven central. +Just add arcus-java-client artifact into your pom.xml. + +```xml + + + com.navercorp + arcus-java-client + 1.7.0 + + +``` + +- [Getting Started Guide (in Korean)][getting-started-guide] + +[getting-started-guide]: http://yobi.navercorp.com/openarcus/arcus-java-client/code/master/docs/arcus-java-client-getting-started.md "guide" + +## Building + +To build your own library, simply run the following maven command: + +``` +$ mvn clean install + +# Test cases may not run properly if you do not already have memcached +# and ZooKeeper installed on the local machine. To skip tests, use skipTests. + +$ mvn clean install -DskipTests=true +``` + +## Running Test Cases + +Before running test cases, make sure to set up a local ZooKeeper and run +an Arcus memcached instance. Several Arcus specific test cases assume that +there is an Arcus instance running, along with ZooKeeper. + +First, make a simple ZooKeeper configuration file. By default, tests assume +ZooKeeper is running at localhost:2181. +``` +$ cat test-zk.conf +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 +# the directory where the snapshot is stored. +dataDir=/home1/openarcus/zookeeper_data +# the port at which the clients will connect +clientPort=2181 +maxClientCnxns=200 +``` + +Second, create znodes for one memcached instance running at localhost:11211. +ZooKeeper comes with a command line tool. The following script uses it to +set up the directory structure. +``` +$ cat setup-test-zk.bash + +ZK_CLI="./zookeeper/bin/zkCli.sh" +ZK_ADDR="-server localhost:2181" + +$ZK_CLI $ZK_ADDR create /arcus 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_list 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_list/test 0 +$ZK_CLI $ZK_ADDR create /arcus/client_list 0 +$ZK_CLI $ZK_ADDR create /arcus/client_list/test 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_server_mapping 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_server_log 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_server_mapping/127.0.0.1:11211 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_server_mapping/127.0.0.1:11211/test 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_server_mapping/127.0.0.1:11212 0 +$ZK_CLI $ZK_ADDR create /arcus/cache_server_mapping/127.0.0.1:11212/test 0 +``` + +Now start the ZooKeeper instance using the configuration above. +``` +$ ZOOCFGDIR=$PWD ./zookeeper/bin/zkServer.sh start test-zk.conf +``` + +And, start the memcached instance. +``` +$ /home1/openarcus/bin/memcached -E /home1/openarcus/lib/default_engine.so -p 11211 -z localhost:2181 +``` + +Finally, run test cases. +``` +$ mvn test +[...] +Results : + +Tests run: 722, Failures: 0, Errors: 0, Skipped: 8 + +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 3:17.308s +[INFO] Finished at: Thu Mar 06 13:42:58 KST 2014 +[INFO] Final Memory: 9M/722M +[INFO] ------------------------------------------------------------------------ +``` + +## Issues + +If you find a bug, please report it via the GitHub issues page. + +https://github.com/naver/arcus-java-client/issues + +## Arcus Contributors + +In addition to those who had contributed to the original libmemcached, the +following people at NAVER have contributed to arcus-java-client. + +Chisu Ryu (netspider) ; +Hoonmin Kim (harebox) ; +YeaSol Kim (ngleader) ; +SeongHwa Ahn ; +HyongYoub Kim + +## License + +Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0 + +## Patents + +Arcus has patents on b+tree smget operation. +Refer to PATENTS file in this directory to get the patent information. + +Under the Apache License 2.0, a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable patent license is granted to any user for any usage. +You can see the specifics on the grant of patent license in LICENSE file in this directory. diff --git a/buildfile b/buildfile new file mode 100644 index 000000000..b9ef6b91b --- /dev/null +++ b/buildfile @@ -0,0 +1,86 @@ +# -*- mode: ruby -*- +# Generated by Buildr 1.2.10, change to your liking +# Version number for this release +VERSION_NUMBER = `git describe`.strip +# Version number for the next release +NEXT_VERSION = VERSION_NUMBER +# Group identifier for your projects +GROUP = "spy" +COPYRIGHT = "2006-2009 Dustin Sallings" + +MAVEN_1_RELEASE = true +RELEASE_REPO = 'http://bleu.west.spy.net/~dustin/repo' +PROJECT_NAME = "memcached" + +def compute_released_verions + h = {} + `git tag`.reject{|i| i =~ /pre|rc/}.map{|v| v.strip}.each do |v| + a=v.split('.') + h[a[0..1].join('.')] = v + end + require 'set' + rv = Set.new h.values + rv << VERSION_NUMBER + rv +end + +RELEASED_VERSIONS=compute_released_verions.sort.reverse + +# Specify Maven 2.0 remote repositories here, like this: +repositories.remote << "http://www.ibiblio.org/maven2/" +repositories.remote << "http://bleu.west.spy.net/~dustin/m2repo/" + +require 'buildr/java/emma' + +plugins=[ + 'spy:m1compat:rake:1.0', + 'spy:site:rake:1.2.4', + 'spy:git_tree_version:rake:1.0', + 'spy:build_info:rake:1.1.1' +] + +plugins.each do |spec| + artifact(spec).tap do |plugin| + plugin.invoke + load plugin.name + end +end + +desc "Java memcached client" +define "memcached" do + + test.options[:java_args] = "-ea" + test.include "*Test" + TREE_VER=tree_version + puts "Tree version is #{TREE_VER}" + + project.version = VERSION_NUMBER + project.group = GROUP + compile.options.target = '1.5' + manifest["Implementation-Vendor"] = COPYRIGHT + compile.with "log4j:log4j:jar:1.2.15", "jmock:jmock:jar:1.2.0", + "junit:junit:jar:4.4" + + # Gen build + gen_build_info "net.spy.memcached", "git" + compile.from "target/generated-src" + resources.from "target/generated-rsrc" + + package(:jar).with :manifest => + manifest.merge("Main-Class" => "net.spy.memcached.BuildInfo") + + package :sources + package :javadoc + javadoc.using(:windowtitle => "javadocs for spymemcached #{TREE_VER}", + :doctitle => "Javadocs for spymemcached #{TREE_VER}", + :use => true, + :charset => 'utf-8', + :overview => 'src/main/java/net/spy/memcached/overview.html', + :group => { 'Core' => 'net.spy.memcached' }, + :link => 'http://java.sun.com/j2se/1.5.0/docs/api/') + + emma.exclude 'net.spy.memcached.test.*' + emma.exclude 'net.spy.memcached.BuildInfo' + +end +# vim: syntax=ruby et ts=2 diff --git a/docs/arcus-java-client-getting-started.md b/docs/arcus-java-client-getting-started.md new file mode 100644 index 000000000..5de45b734 --- /dev/null +++ b/docs/arcus-java-client-getting-started.md @@ -0,0 +1,303 @@ +## 처음 사용자용 가이드 + +이 문서는 Arcus를 처음 접하는 자바 개발자를 위해 작성되었습니다. +[Apache Maven][maven]의 개념과 기본 사용법을 알고 있다고 가정하고 있으며, +자세한 설명을 하기 보다는 Copy&Paste를 통해 Arcus를 사용해볼 수 있는 내용으로 되어 있습니다. + +[maven]: http://maven.apache.org/ "Apache Maven" + +### Arcus + +Arcus는 오픈소스 key-value 캐시 서버인 memcached를 기반으로 사내에서 개발 중인, 부분적으로 fault-tolerant한 메모리 기반의 캐시 클라우드 서비스입니다. +* memcached : 구글, 페이스북 등에서 대규모로 사용하고 있는 메모리 캐시 서버입니다. +* 캐시 : 자주 사용되는 데이터를 비교적 고속의 저장소에 넣어둠으로써, 느린 저장소로의 요청을 줄이고 보다 빠른 응답성을 기대할 수 있게 하는 서비스입니다. +* 메모리 기반 : Arcus는 데이터를 메모리에만 저장합니다. 따라서 모든 데이터는 휘발성이며 언제든지 삭제될 수 있습니다. +* 클라우드 서비스 : 각 서비스는 필요에 따라 전용 캐시 클러스터를 구성할 수 있으며 동적으로 캐시 서버를 추가하거나 삭제할 수 있습니다. (단, 일부 데이터는 유실됩니다) +* fault-tolerant : Arcus는 일부 또는 전체 캐시 서버의 이상 상태를 감지하여 적절한 조치를 취합니다. + +또한 Arcus는 key-value 형태의 데이터뿐만 아니라 List, Set, B+Tree 등의 자료구조를 저장할 수 있는 기능을 제공합니다. + +### 미리 알아두기 + +- 키(key) + - Arcus의 key는 prefix와 subkey로 구성되며, prefix와 subkey는 콜론(:)으로 구분됩니다. (예) *users:user_12345* + - Arcus는 prefix를 기준으로 별도의 통계를 수집합니다. prefix 개수의 제한은 없으나 통계 수집을 하는 경우에는 너무 많지 않는 수준(5~10개)으로 생성하시는 것을 권합니다. + - 키는 prefix, subkey를 포함하여 250자를 넘을 수 없습니다. 따라서 반드시 응용에서 키 길이를 제한하셔야 합니다. +- 값(value) + - 하나의 키에 대한 값은 바이트 스트림 형태로 최대 1MB 까지 저장될 수 있습니다. + - 자바 객체를 저장하는 경우, 해당 객체는 반드시 Serializable 인터페이스를 구현해야 합니다. +- 컬렉션 자료구조(collections) + - Arcus는 List, Set, B+Tree 등의 자료구조를 제공합니다. 하지만 Java Collection API와는 +* Arcus 접속 정보 + - Arcus admin: ZooKeeper 서버 주소로서 캐시 서버들의 IP와 PORT 정보를 조회하고 변경이 있을 때 클라이언트에게 알려주는 역할을 합니다. + - Arcus service code: 사용자 또는 서비스에게 할당된 캐시 서버들을 구분짓는 코드값입니다. + +### Hello, Arcus! + +기본적인 key-value 캐시 요청을 수행해보도록 하겠습니다. 아커스 서버가 구성되어 있다고 가정합니다. +우선 다음과 같이 비어 있는 자바 프로젝트를 생성합니다. + +```bash +$ mvn archetype:generate -DgroupId=com.navercorp.arcus -DartifactId=arcus-quick-start -DinteractiveMode=false +$ cd arcus-quick-start +$ mvn eclipse:eclipse // 이클립스 IDE를 사용하는 경우 실행하여 이클립스 프로젝트를 생성하여 활용합니다. +``` + +#### pom.xml + +프로젝트가 생성되면 Arcus 클라이언트를 pom.xml에서 참조하도록 변경합니다. + +```xml + + 4.0.0 + + com.navercorp.arcus + arcus-quick-start + 1.0-SNAPSHOT + jar + + arcus-quick-start + http://maven.apache.org + + + UTF-8 + + + + + + + + junit + junit + 4.4 + test + + + + + com.navercorp.arcus + arcus-java-client + 1.7.0 + + + + + log4j + log4j + 1.2.16 + + + org.slf4j + slf4j-api + 1.6.1 + + + org.slf4j + slf4j-log4j12 + 1.6.1 + + + +``` + +#### HelloArcus.java + +이제 Arcus와 통신하는 클래스를 생성해봅시다. +시나리오는 다음과 같습니다. +- HelloArcus.sayHello(): Arcus 캐시 서버에 "Hello, Arcus!" 값을 저장합니다. +- HelloArcus.listenHello(): Arcus 캐시 서버에 저장된 "Hello, Arcus!" 값을 읽어옵니다. + +```java +// HelloArcusTest.java +package com.navercorp.arcus; + +import junit.framework.Assert; + +import org.junit.Before; +import org.junit.Test; + +public class HelloArcusTest { + + HelloArcus helloArcus = new HelloArcus("127.0.0.1:2181", "test"); + + @Before + public void sayHello() { + helloArcus.sayHello(); + } + + @Test + public void listenHello() { + Assert.assertEquals("Hello, Arcus!", helloArcus.listenHello()); + } + +} +``` + +```java +// HelloArcus.java +package com.navercorp.arcus; + +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.ArcusClient; +import net.spy.memcached.ConnectionFactoryBuilder; + +public class HelloArcus { + + private String arcusAdmin; + private String serviceCode; + private ArcusClient arcusClient; + + public HelloArcus(String arcusAdmin, String serviceCode) { + this.arcusAdmin = arcusAdmin; + this.serviceCode = serviceCode; + + // log4j logger를 사용하도록 설정합니다. + // 코드에 직접 추가하지 않고 아래의 JVM 환경변수를 사용해도 됩니다. + // -Dnet.spy.log.LoggerImpl=net.spy.memcached.compat.log.Log4JLogger + System.setProperty("net.spy.log.LoggerImpl", "net.spy.memcached.compat.log.Log4JLogger"); + + // Arcus 클라이언트 객체를 생성합니다. + // - arcusAdmin : Arcus 캐시 서버들의 그룹을 관리하는 admin 서버(ZooKeeper)의 주소입니다. + // - serviceCode : 사용자에게 할당된 Arcus 캐시 서버들의 집합에 대한 코드값입니다. + // - connectionFactoryBuilder : 클라이언트 생성 옵션을 지정할 수 있습니다. + // + // 정리하면 arcusAdmin과 serviceCode의 조합을 통해 유일한 캐시 서버들의 집합을 얻어 연결할 수 있는 것입니다. + this.arcusClient = ArcusClient.createArcusClient(arcusAdmin, serviceCode, new ConnectionFactoryBuilder()); + } + + public boolean sayHello() { + Future future = null; + boolean setSuccess = false; + + // Arcus의 "test:hello" 키에 "Hello, Arcus!"라는 값을 저장합니다. + // 그리고 Arcus의 거의 모든 API는 Future를 리턴하도록 되어 있으므로 + // 비동기 처리에 특화된 서버가 아니라면 반드시 명시적으로 future.get()을 수행하여 + // 반환되는 응답을 기다려야 합니다. + future = this.arcusClient.set("test:hello", 600, "Hello, Arcus!"); + + try { + setSuccess = future.get(700L, TimeUnit.MILLISECONDS); + } catch (Exception e) { + if (future != null) future.cancel(true); + e.printStackTrace(); + } + + return setSuccess; + } + + public String listenHello() { + Future future = null; + String result = "Not OK."; + + // Arcus의 "test:hello" 키의 값을 조회합니다. + // Arcus에서는 가능한 모든 명령에 명시적으로 timeout 값을 지정하도록 가이드 하고 있으며 + // 사용자는 set을 제외한 모든 요청에 async로 시작하는 API를 사용하셔야 합니다. + future = this.arcusClient.asyncGet("test:hello"); + + try { + result = (String)future.get(700L, TimeUnit.MILLISECONDS); + } catch (Exception e) { + if (future != null) future.cancel(true); + e.printStackTrace(); + } + + return result; + } + +} +``` + +#### src/test/resources/log4j.xml +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +#### 테스트 + +테스트가 통과하는지 확인해봅니다. + +```bash +$ mvn test +... +------------------------------------------------------- + T E S T S +------------------------------------------------------- +... +Results : + +Tests run: 1, Failures: 0, Errors: 0, Skipped: 0 + +[INFO] ------------------------------------------------------------------------ +[INFO] BUILD SUCCESS +[INFO] ------------------------------------------------------------------------ +[INFO] Total time: 1.885s +[INFO] Finished at: Mon Dec 17 14:13:22 KST 2012 +[INFO] Final Memory: 4M/81M +[INFO] ------------------------------------------------------------------------ +``` diff --git a/etc/MemcachedOptimization.graffle b/etc/MemcachedOptimization.graffle new file mode 100644 index 0000000000000000000000000000000000000000..9356fb0ab5047956162c14580be555cc491046a4 GIT binary patch literal 2554 zcmV zYk57}9emvX+WF;B-~ah|@9010*JqtSTA!qmV+TxXeLcOrIFt4do6VEa$f44iHyn)_ z^_$J}&bidOyl8hMURbmF>1tovzh`Xpq1l{FCbAid%a%6`1%qbG_eRub(@S3JAy<%l zthdkWi>t}m#lP*@7CYW+96iwK@rlLkZ}iffQvbs3(Z{1^_$_~EyNnK~e+-VAv)h>p zuiLgv#*4g-OP(v4zHf#%jiZ41yzR%_LfIR-_Q3bXBl()&efG_M-=W;%qh=zMk%!2T zA;~;BYR<}x>mD=jr1#%(z~*%_e@vUR=Vzw-%?!>w$Mfd`NpoX?$?WU2%3N*PzKILF z_U(b4HZhTX0tXU!&Z?#wZOe4%T*0TN6IK#kXL6o19=a!vJ#b4M4oe*#OSea+#jDDf zi`7EG{6jb}=O&{?lMw*gAutY+CPDOp|5b=|2s8jntq2RrbYSxIocFrzN^R3|_{X5A zb#4bE$DHOHKkH+OD}I#1OHqyEVRV1)S>qvfSrJdOjv%EziPUT>2oW!U-)zj2203OB z%m;pM)urvyq?{}kW1U~A*~_TR3yhy$oTtxHGxeOaG*UPTwB|~hUQh}pS|0x@Yx2Bl zsrgSB1DPC1NLQb8_GctCjX%d&CWfAuN&PCRvCXFGxRo2sRlNtAZFDM-cG!vUdy|aN z+>-Xa>5r19SxaNpa3DI<5zT1|{b8=sF>R-^Hc;q}%rF#2BrIm#QHHo+Q7r5BJ)v|J zBr zKPm|9J$3t@%h;XM?SpPVnAkzk?OWW#0?IT^`jtB0DC6tNpPsp>6|UU4>`z^v_7EZ6 zflsL$-rPB3%CB6iJ2L%Vmpu{{@P8ph(M8q63qneC(Ls2@I1<4|B%&yh2mz4@*A6c# zj6_t8i~M6nMcck>vaxSEdwa3TNRhf$A4mhrq`w~jE{{vb5wrJ2G*_aq>@1b;QUX+V z?5domld5lJ)vrJVrdTY{6?v`<;n#bMN7UlH+*Tm*+LWHxmy*~BmS_z=eE0X&=JjpgDdXt z2@ve=!~O2MyH2c$cAYIRuvsR)H@6hX7#Rk^Dg?Tr0aZaTV|uK739FG0Za_{B@gE`<~Fu28U{FlE%!MKWDXi=nBd zA(91@M~JMDkn<3d4G?jjqSlv&OF6Uu@6%7*u>OkS3RBFzvIq%=GJ^L}% z!v-h2aQ|gs*?^>8>Ft2^^i8oIS)cV3Y+zm22sCu8)~r{v-v7Z$PXV&3>tP5=krBW( z=k0*=v`ulIvNq=tO;L5i*ZP=WjCyxlbKcwJyzd*{-Kt8s&cjGn4Xx(99dMqyDbB-d zavopYb8pcR)F2?bN=VIlHRrwmb)Kfl0E9m^0xV-xuk&`nd1Op^e^dc(())wg>-{$)T^UkpTS$l-5)m_V7RMvcSm;t;(v9FiB~5O5qC zArOHsBaPHR+%6FDI|HKf!$3qTmNirZamzqd-Wd?_kIu{p0a?MY#^H8xNZuI^(GPD` zpr*=1(`z7Z7l`WGK&*}v&c=ujDVe>FfVIuV2!+stA_^>>DHmg}d|-UhY4S4E)(%|N zah-MJIv3Yc=icWoxvALB>%YVcF_f{Q$I%xp@&pBJ@jYHuOgZ&?wcErkQ(yc_?ILU$ z28WfR%4i3$LN%;*4=eI2uu_fq9oAj26|P~mdsr#22P;L}30o;OtacA8{A!aRG_;+t z6{%shdsv}Yfz^E74i$YTl%>|#+C8@5ir8B14RRHT-ykoffc)&+z3h3WXtd@DAU}`o z9t|!%>w)%)SqO87N;1;7c3|I?lNRPrQ^oPFmN}ro3Mm^ehFtKBI}oG(D&j%QbSNw5 zY%H7n8>OR;m(Q};H=Q8Off*(943;KE4Ew0OEs) zO(#o5tB}Ag@W!kf3zxS0P&K&aj!oiVYsd$DaNGBkvZ8SODAzYdmF448m6>(HVKH%r z)JB>kwNaTRwH#ra2*PWyBCu4QFSSvfF||>dGqq95n%bz$o4O!*U1jdnMmc+GqdJLc zV@Vp*Ms-HgS!Ar470c6CBL!_kL^4a-S7MIEt}I}WMutBcA_iF!-plhy^$|W1mn>!z zF`q0=sgmcE1>%cls4};d2Z|$|y76&N!Dxwo=D8j-$98G)cbCMdQ!@~S@^8G!I*8}u zE5kjaqK?f()(A)}(faDz|BR{l7Y2brx2xf~isip}J-eUen_iaISor|e4+M+T8pj3E zb@Liud5o?;D+n+lM#M~-bLHxV3*b`)p4fGG&!6%ke+X7s+(dp>noZ}Kv#;8x9%J6{ zi|J?iJ;M#Nvipu_vOH`=KM7`hE~(&>_Sq{I-{PACKX=AlPeY{kVP|=9?kngo3 zGr}a|@uEInKCJ-zNSV+3e1B-sHFb+jxWLuF2K1B5Y@dccDNbI0yXx-ZciZiGlWJEz z8~Z-*h}_z=YSlPDhfn>Ec~{DgpHNN?G^xh#c8}esI?*U1V5HMb{3eK>o%GWn0>KB8 z5{3@cFd2ldpnUKknam?@yrF~aESr`QEh>2tBz(78E5;w9BL4KeVZp{sI{pOr={5R4 Q6OQ-(2RSPC-P2(J09H{6VE_OC literal 0 HcmV?d00001 diff --git a/pom.xml b/pom.xml new file mode 100644 index 000000000..a74caff56 --- /dev/null +++ b/pom.xml @@ -0,0 +1,173 @@ + + 4.0.0 + arcus + arcus-client + 1.7.0 + Arcus Java Client + jar + + + + log4j + log4j + 1.2.16 + provided + + + net.sf.ehcache + ehcache-core + 2.6.0 + + + org.apache.zookeeper + zookeeper + 3.4.5 + + + log4j + log4j + + + + + + + junit + junit + 4.4 + test + + + jmock + jmock + 1.2.0 + test + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + 1.6 + 1.6 + true + true + utf-8 + true + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy-dependencies + + copy-dependencies + + + jmock,junit + target + false + true + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 1.1 + + + add-test-source + generate-sources + + add-test-source + + + + + src/test/manual + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.7 + + + generate-java-doc + package + + + + public + false + + + + org.apache.maven.plugins + maven-source-plugin + 2.0.4 + + + attach-sources + + jar + + + + + + org.apache.maven.plugins + maven-jar-plugin + 2.3.1 + + + true + + true + true + + + ${project.version} + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + + **/BinaryIPV6ClientTest.java + **/AsciiIPV6ClientTest.java + + **/CacheMonitorTest.java + + **/BinaryClientTest.java + + **/RedistributeFailureModeTest.java + **/LoggingTest.java + + + + + + + org.apache.maven.wagon + wagon-webdav + 1.0-beta-2 + + + + diff --git a/src/main/java/net/spy/memcached/AddrUtil.java b/src/main/java/net/spy/memcached/AddrUtil.java new file mode 100644 index 000000000..a2d33f32a --- /dev/null +++ b/src/main/java/net/spy/memcached/AddrUtil.java @@ -0,0 +1,52 @@ +package net.spy.memcached; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; + +/** + * Convenience utilities for simplifying common address parsing. + */ +public class AddrUtil { + + /** + * Split a string containing whitespace or comma separated host or + * IP addresses and port numbers of the form "host:port host2:port" + * or "host:port, host2:port" into a List of InetSocketAddress + * instances suitable for instantiating a MemcachedClient. + * + * Note that colon-delimited IPv6 is also supported. + * For example: ::1:11211 + */ + public static List getAddresses(String s) { + if(s == null) { + throw new NullPointerException("Null host list"); + } + if(s.trim().equals("")) { + throw new IllegalArgumentException("No hosts in list: ``" + + s + "''"); + } + ArrayList addrs= + new ArrayList(); + + for(String hoststuff : s.split("(?:\\s|,)+")) { + if(hoststuff.equals("")) { + continue; + } + + int finalColon=hoststuff.lastIndexOf(':'); + if(finalColon < 1) { + throw new IllegalArgumentException("Invalid server ``" + + hoststuff + "'' in list: " + s); + + } + String hostPart=hoststuff.substring(0, finalColon); + String portNum=hoststuff.substring(finalColon+1); + + addrs.add(new InetSocketAddress(hostPart, + Integer.parseInt(portNum))); + } + assert !addrs.isEmpty() : "No addrs found"; + return addrs; + } +} diff --git a/src/main/java/net/spy/memcached/ArcusClient.java b/src/main/java/net/spy/memcached/ArcusClient.java new file mode 100644 index 000000000..df8d69f84 --- /dev/null +++ b/src/main/java/net/spy/memcached/ArcusClient.java @@ -0,0 +1,3886 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.io.IOException; +import java.io.InputStream; +import java.net.InetSocketAddress; +import java.net.URL; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.jar.JarFile; +import java.util.jar.Manifest; + +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.BKeyObject; +import net.spy.memcached.collection.BTreeCount; +import net.spy.memcached.collection.BTreeCreate; +import net.spy.memcached.collection.BTreeDelete; +import net.spy.memcached.collection.BTreeElement; +import net.spy.memcached.collection.BTreeFindPosition; +import net.spy.memcached.collection.BTreeGet; +import net.spy.memcached.collection.BTreeGetBulk; +import net.spy.memcached.collection.BTreeGetBulkWithByteTypeBkey; +import net.spy.memcached.collection.BTreeGetBulkWithLongTypeBkey; +import net.spy.memcached.collection.BTreeGetByPosition; +import net.spy.memcached.collection.BTreeGetResult; +import net.spy.memcached.collection.BTreeMutate; +import net.spy.memcached.collection.BTreeOrder; +import net.spy.memcached.collection.BTreeSMGet; +import net.spy.memcached.collection.BTreeSMGetWithByteTypeBkey; +import net.spy.memcached.collection.BTreeSMGetWithLongTypeBkey; +import net.spy.memcached.collection.BTreeStore; +import net.spy.memcached.collection.BTreeStoreAndGet; +import net.spy.memcached.collection.BTreeUpdate; +import net.spy.memcached.collection.BTreeUpsert; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.ByteArrayTreeMap; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionBulkStore; +import net.spy.memcached.collection.CollectionCount; +import net.spy.memcached.collection.CollectionCreate; +import net.spy.memcached.collection.CollectionDelete; +import net.spy.memcached.collection.CollectionExist; +import net.spy.memcached.collection.CollectionGet; +import net.spy.memcached.collection.CollectionMutate; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.CollectionPipedStore; +import net.spy.memcached.collection.CollectionPipedStore.BTreePipedStore; +import net.spy.memcached.collection.CollectionPipedStore.ByteArraysBTreePipedStore; +import net.spy.memcached.collection.CollectionPipedStore.ListPipedStore; +import net.spy.memcached.collection.CollectionPipedStore.SetPipedStore; +import net.spy.memcached.collection.CollectionPipedUpdate; +import net.spy.memcached.collection.CollectionPipedUpdate.BTreePipedUpdate; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.CollectionStore; +import net.spy.memcached.collection.CollectionUpdate; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagUpdate; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.ExtendedBTreeGet; +import net.spy.memcached.collection.ListCreate; +import net.spy.memcached.collection.ListDelete; +import net.spy.memcached.collection.ListGet; +import net.spy.memcached.collection.ListStore; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.collection.SetCreate; +import net.spy.memcached.collection.SetDelete; +import net.spy.memcached.collection.SetExist; +import net.spy.memcached.collection.SetGet; +import net.spy.memcached.collection.SetPipedExist; +import net.spy.memcached.collection.SetStore; +import net.spy.memcached.compat.log.Logger; +import net.spy.memcached.compat.log.LoggerFactory; +import net.spy.memcached.internal.BTreeStoreAndGetFuture; +import net.spy.memcached.internal.CheckedOperationTimeoutException; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.internal.CollectionGetBulkFuture; +import net.spy.memcached.internal.OperationFuture; +import net.spy.memcached.internal.SMGetFuture; +import net.spy.memcached.ops.BTreeFindPositionOperation; +import net.spy.memcached.ops.BTreeGetBulkOperation; +import net.spy.memcached.ops.BTreeGetByPositionOperation; +import net.spy.memcached.ops.BTreeSortMergeGetOperation; +import net.spy.memcached.ops.BTreeStoreAndGetOperation; +import net.spy.memcached.ops.CollectionBulkStoreOperation; +import net.spy.memcached.ops.CollectionGetOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionPipedExistOperation; +import net.spy.memcached.ops.CollectionPipedStoreOperation; +import net.spy.memcached.ops.CollectionPipedUpdateOperation; +import net.spy.memcached.ops.ExtendedBTreeGetOperation; +import net.spy.memcached.ops.GetAttrOperation; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StoreType; +import net.spy.memcached.plugin.FrontCacheMemcachedClient; +import net.spy.memcached.transcoders.CollectionTranscoder; +import net.spy.memcached.transcoders.Transcoder; +import net.spy.memcached.util.BTreeUtil; + +/** + * Client to a Arcus. + * + *

Basic usage

+ * + *
+ * final static String arcusAdminAddrs = "127.0.0.1:2181";
+ * final static String serviceCode = "cafe";
+ * 
+ * ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder();
+ * 
+ * ArcusClient c = ArcusClient.createArcusClient(arcusAdminAddrs, serviceCode, cfb);
+ * 
+ * // Store a value (async) for one hour
+ * c.set("someKey", 3600, someObject);
+ * // Retrieve a value.
+ * Future<Object> myFuture = c.asyncGet("someKey");
+ * 
+ * If pool style is needed, it will be used as follows
+ * 
+ * int poolSize = 4;
+ * ArcusClientPool pool = ArcusClient.createArcusClientPool(arcusAdminAddrs, serviceCode, cfb, poolSize);
+ * 
+ * // Store a value
+ * pool.set("someKey", 3600, someObject);
+ * // Retrieve a value
+ * Future<Object> myFuture = pool.asyncGet("someKey");
+ * 
+ * 
+ * + */ +public class ArcusClient extends FrontCacheMemcachedClient implements ArcusClientIF { + + static String VERSION; + static Logger arcusLogger = LoggerFactory.getLogger("net.spy.memcached"); + static final String ARCUS_CLOUD_ADDR = "127.0.0.1:2181"; + public boolean dead; + + final BulkService bulkService; + final Transcoder collectionTranscoder; + + final int smgetKeyChunkSize; + + static final int BOPGET_BULK_CHUNK_SIZE = 200; + static final int NON_PIPED_BULK_INSERT_CHUNK_SIZE = 500; + + static final int MAX_GETBULK_KEY_COUNT = 200; + static final int MAX_GETBULK_ELEMENT_COUNT = 50; + static final int MAX_SMGET_COUNT = 1000; // server configuration is 2000. + + private CacheManager cacheManager; + + public void setCacheManager(CacheManager cacheManager) { + this.cacheManager = cacheManager; + } + + /** + * + * @param hostPorts + * arcus admin addresses + * @param serviceCode + * service code + * @param cfb + * ConnectionFactoryBuilder + * @return a single ArcusClient + */ + public static ArcusClient createArcusClient(String hostPorts, String serviceCode, + ConnectionFactoryBuilder cfb) { + + return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, 1, 10000).getClient(); + + } + + /** + * + * @param serviceCode + * service code + * @param cfb + * ConnectionFactoryBuilder + * @return a single ArcusClient + */ + public static ArcusClient createArcusClient(String serviceCode, + ConnectionFactoryBuilder cfb) { + + return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, 1, 10000).getClient(); + + } + + /** + * + * @param hostPorts + * arcus admin addresses + * @param serviceCode + * service code + * @param poolSize + * Arcus clinet pool size + * @param cfb + * ConnectionFactoryBuilder + * @return multiple ArcusClient + * + */ + public static ArcusClientPool createArcusClientPool(String hostPorts, String serviceCode, + ConnectionFactoryBuilder cfb, int poolSize) { + + return ArcusClient.createArcusClient(hostPorts, serviceCode, cfb, poolSize, 0); + + } + + /** + * + * @param serviceCode + * service code + * @param poolSize + * Arcus clinet pool size + * @param cfb + * ConnectionFactoryBuilder + * @return multiple ArcusClient + * + */ + public static ArcusClientPool createArcusClientPool(String serviceCode, + ConnectionFactoryBuilder cfb, int poolSize) { + + return ArcusClient.createArcusClient(ARCUS_CLOUD_ADDR, serviceCode, cfb, poolSize, 0); + + } + + /** + * + * @param hostPorts + * arcus admin addresses + * @param serviceCode + * service code + * @param cfb + * ConnectionFactoryBuilder + * @param poolSize + * Arcus clinet pool size + * @param waitTimeFor Connect + * waiting time for connection establishment(milliseconds) + * + * @return multiple ArcusClient + */ + private static ArcusClientPool createArcusClient(String hostPorts, String serviceCode, + ConnectionFactoryBuilder cfb, int poolSize, int waitTimeForConnect) { + + if (hostPorts == null) { + throw new NullPointerException("Arcus admin address required"); + } + + if (serviceCode == null) { + throw new NullPointerException("Service code required"); + } + + if (hostPorts.isEmpty()) { + throw new IllegalArgumentException("Arcus admin address is empty."); + } + + if (serviceCode.isEmpty()) { + throw new IllegalArgumentException("Service code is empty."); + } + + if (VERSION == null) { + VERSION = getVersion(); + } + + final CountDownLatch latch = new CountDownLatch(1); + + net.spy.memcached.CacheManager exe = new net.spy.memcached.CacheManager( + hostPorts, serviceCode, cfb, latch, poolSize, + waitTimeForConnect); + + try { + latch.await(); + } catch (Exception e) { + arcusLogger.warn("you cannot see this message!"); + } + + ArcusClient[] client = exe.getAC(); + + return new ArcusClientPool(poolSize, client); + } + + /** + * Create an Arcus client for the given memcached server addresses. + * + * @param cf connection factory to configure connections for this client + * @param addrs socket addresses for the memcached servers + * @return Arcus client + */ + protected static ArcusClient getInstance(ConnectionFactory cf, + List addrs) throws IOException { + return new ArcusClient(cf, addrs); + } + + /** + * Create an Arcus client for the given memcached server addresses. + * + * @param cf connection factory to configure connections for this client + * @param addrs socket addresses for the memcached servers + * @throws IOException if connections cannot be established + */ + public ArcusClient(ConnectionFactory cf, List addrs) + throws IOException { + super(cf, addrs); + bulkService = new BulkService(cf.getBulkServiceLoopLimit(), + cf.getBulkServiceThreadCount(), cf.getBulkServiceSingleOpTimeout()); + collectionTranscoder = new CollectionTranscoder(); + smgetKeyChunkSize = cf.getDefaultMaxSMGetKeyChunkSize(); + registerMbean(); + } + + /** + * Register mbean for Arcus client statistics. + */ + private void registerMbean() { + if ("false".equals(System.getProperty("arcus.mbean", "false") + .toLowerCase())) { + getLogger().info("Arcus client statistics MBean is NOT registered."); + return; + } + + try { + StatisticsHandler mbean = new StatisticsHandler(this); + ArcusMBeanServer.getInstance().registMBean( + mbean, + mbean.getClass().getPackage().getName() + ":type=" + + mbean.getClass().getSimpleName() + "-" + + mbean.hashCode()); + + getLogger().info("Arcus client statistics MBean is registered."); + } catch (Exception e) { + getLogger().warn("Failed to initialize statistics mbean.", e); + } + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#shutdown() + */ + @Override + public void shutdown() { + super.shutdown(); + // Connect to Arcus server directly, cache manager may be null. + if (cacheManager != null) { + cacheManager.shutdown(); + } + dead = true; + if (bulkService != null) { + bulkService.shutdown(); + } + } + + Future asyncStore(StoreType storeType, String key, + int exp, CachedData co) { + final CountDownLatch latch=new CountDownLatch(1); + final OperationFuture rv=new OperationFuture(latch, + operationTimeout); + Operation op=opFact.store(storeType, key, co.getFlags(), + exp, co.getData(), new OperationCallback() { + public void receivedStatus(OperationStatus val) { + rv.set(val.isSuccess()); + } + public void complete() { + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSetAttr(java.lang.String, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncSetAttr(String key, + Attributes attrs) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + Operation op = opFact.setAttr(key, attrs, new OperationCallback() { + public void receivedStatus(OperationStatus status) { + if (status instanceof CollectionOperationStatus) { + rv.set(status.isSuccess(), + (CollectionOperationStatus) status); + } else { + throw new RuntimeException("Unhandled state: " + status); + } + } + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSetAttr(java.lang.String, java.lang.Integer, java.lang.Long, net.spy.memcached.collection.CollectionOverflowAction) + */ + @Override + @Deprecated + public CollectionFuture asyncSetAttr(String key, + Integer expireTime, Long maxCount, + CollectionOverflowAction overflowAction) { + CollectionAttributes attrs = new CollectionAttributes(expireTime, + maxCount, overflowAction); + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + Operation op = opFact.setAttr(key, attrs, new OperationCallback() { + public void receivedStatus(OperationStatus val) { + if (val instanceof CollectionOperationStatus) { + rv.set(val.isSuccess(), (CollectionOperationStatus) val); + } else { + getLogger().warn("Unhandled state: " + val); + } + } + + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncGetAttr(java.lang.String) + */ + @Override + public CollectionFuture asyncGetAttr(final String key) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + Operation op = opFact.getAttr(key, new GetAttrOperation.Callback() { + CollectionAttributes attrs = new CollectionAttributes(); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus stat; + + if (status instanceof CollectionOperationStatus) { + stat = (CollectionOperationStatus) status; + } else { + stat = new CollectionOperationStatus(status); + } + + rv.set(stat.isSuccess() ? attrs : null, stat); + } + public void complete() { + latch.countDown(); + } + public void gotAttribute(String k, String attr) { + assert key.equals(k) : "Wrong key returned"; + attrs.setAttribute(attr); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Generic get operation for list items. Public methods for list items call this method. + * + * @param k list item's key + * @param collectionGet operation parameters (element key and so on) + * @param tc transcoder to serialize and unserialize value + * @return future holding the fetched value + */ + private CollectionFuture> asyncLopGet(final String k, + final CollectionGet collectionGet, final Transcoder tc) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture> rv = new CollectionFuture>( + latch, operationTimeout); + + Operation op = opFact.collectionGet(k, collectionGet, + new CollectionGetOperation.Callback() { + List list = new ArrayList(); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(list, cstatus); + return; + } + switch (cstatus.getResponse()) { + case NOT_FOUND: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) not found : %s", k, + cstatus); + } + break; + case NOT_FOUND_ELEMENT: + rv.set(list, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found : %s", + k, cstatus); + } + break; + case OUT_OF_RANGE: + rv.set(list, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found in condition : %s", + k, cstatus); + } + break; + case UNREADABLE: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) is not readable : %s", + k, cstatus); + } + break; + default: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) unknown status : %s", + k, cstatus); + } + break; + } + } + public void complete() { + latch.countDown(); + } + public void gotData(String key, long subkey, int flags, + byte[] data) { + assert key.equals(k) : "Wrong key returned"; + list.add(tc.decode(new CachedData(flags, data, tc + .getMaxSize()))); + } + }); + rv.setOperation(op); + addOp(k, op); + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSopExist(java.lang.String, T, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncSopExist(String key, T value, + Transcoder tc) { + SetExist exist = new SetExist(); + exist.setValue(value); + return asyncCollectionExist(key, "", exist, tc); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSopExist(java.lang.String, java.lang.Object) + */ + @Override + public CollectionFuture asyncSopExist(String key, Object value) { + SetExist exist = new SetExist(); + exist.setValue(value); + return asyncCollectionExist(key, "", exist, collectionTranscoder); + } + + /** + * Generic get operation for set items. Public methods for set items call this method. + * + * @param k set item's key + * @param collectionGet operation parameters (element key and so on) + * @param tc transcoder to serialize and unserialize value + * @return future holding the fetched value + */ + private CollectionFuture> asyncSopGet(final String k, + final CollectionGet collectionGet, final Transcoder tc) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture> rv = new CollectionFuture>(latch, + operationTimeout); + + Operation op = opFact.collectionGet(k, collectionGet, + new CollectionGetOperation.Callback() { + Set set = new HashSet(); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(set, cstatus); + return; + } + + switch (cstatus.getResponse()) { + case NOT_FOUND: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) not found : %s", k, + cstatus); + } + break; + case NOT_FOUND_ELEMENT: + rv.set(set, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found : %s", + k, cstatus); + } + break; + case UNREADABLE: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) is not readable : %s", + k, cstatus); + } + break; + default: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) unknown status : %s", + k, cstatus); + } + break; + } + } + + public void complete() { + latch.countDown(); + } + + public void gotData(String key, long subkey, int flags, + byte[] data) { + assert key.equals(k) : "Wrong key returned"; + set.add(tc.decode(new CachedData(flags, data, tc + .getMaxSize()))); + } + }); + + rv.setOperation(op); + addOp(k, op); + return rv; + } + + /** + * Generic get operation for b+tree items. Public methods for b+tree items call this method. + * + * @param k b+tree item's key + * @param collectionGet operation parameters (element keys and so on) + * @param reverse false=forward or true=backward + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of fetched elements and their keys + */ + private CollectionFuture>> asyncBopGet( + final String k, final CollectionGet collectionGet, + final boolean reverse, final Transcoder tc) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture>> rv = new CollectionFuture>>( + latch, operationTimeout); + Operation op = opFact.collectionGet(k, collectionGet, + new CollectionGetOperation.Callback() { + TreeMap> map = new TreeMap>( + (reverse) ? Collections.reverseOrder() : null); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(map, cstatus); + return; + } + switch (cstatus.getResponse()) { + case NOT_FOUND: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) not found : %s", k, + cstatus); + } + break; + case NOT_FOUND_ELEMENT: + rv.set(map, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found : %s", + k, cstatus); + } + break; + case UNREADABLE: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) is not readable : %s", + k, cstatus); + } + break; + default: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) Unknown response : %s", + k, cstatus); + } + break; + } + } + public void complete() { + latch.countDown(); + } + public void gotData(String key, long subkey, int flags, + byte[] data) { + assert key.equals(k) : "Wrong key returned"; + map.put(subkey, + new Element(subkey, tc + .decode(new CachedData(flags, data, tc + .getMaxSize())), collectionGet + .getElementFlag())); + } + }); + rv.setOperation(op); + addOp(k, op); + return rv; + } + + /** + * Generic store operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param subkey element key (list index, b+tree bkey) + * @param collectionStore operation parameters (value, eflags, attributes, and so on) + * @param tc transcoder to serialize and unserialize value + * @return future holding the success/failure of the operation + */ + private CollectionFuture asyncCollectionStore(String key, + String subkey, CollectionStore collectionStore, Transcoder tc) { + CachedData co = tc.encode(collectionStore.getValue()); + collectionStore.setFlags(co.getFlags()); + return asyncCollectionStore(key, subkey, collectionStore, co); + } + + /** + * Generic store operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param subkey element key (list index, b+tree bkey) + * @param collectionStore operation parameters (value, eflags, attributes, and so on) + * @param co transcoded value + * @return future holding the success/failure of the operation + */ + CollectionFuture asyncCollectionStore(final String key, + final String subkey, final CollectionStore collectionStore, + final CachedData co) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + Operation op = opFact.collectionStore(key, subkey, collectionStore, + co.getData(), new OperationCallback() { + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(cstatus.isSuccess(), cstatus); + if (!cstatus.isSuccess() + && getLogger().isDebugEnabled()) { + getLogger().debug( + "Insertion to the collection failed : " + + cstatus.getMessage() + + " (type=" + + collectionStore.getClass() + .getName() + ", key=" + key + + ", subkey=" + subkey + ", value=" + + collectionStore.getValue() + ")"); + } + } + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Generic pipelined store operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param store operation parameters (values, attributes, and so on) + * @return future holding the success/failure codes of individual operations and their index + */ + CollectionFuture> asyncCollectionPipedStore( + final String key, final CollectionPipedStore store) { + + if (store.getItemCount() == 0) { + throw new IllegalArgumentException( + "item count for piped operation cannot be 0."); + } + + if (store.getItemCount() > CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { + throw new IllegalArgumentException( + "max item count for piped operation cannot be over " + + CollectionPipedStore.MAX_PIPED_ITEM_COUNT + "."); + } + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture> rv = + new CollectionFuture>(latch, operationTimeout); + + Operation op = opFact.collectionPipedStore(key, store, + new CollectionPipedStoreOperation.Callback() { + Map result = + new TreeMap(); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(result, cstatus); + } + + public void complete() { + latch.countDown(); + } + + public void gotStatus(Integer index, OperationStatus status) { + if (status instanceof CollectionOperationStatus) { + result.put(index, (CollectionOperationStatus) status); + } else { + result.put(index, new CollectionOperationStatus(status)); + } + } + }); + + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Generic pipelined update operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param update operation parameters (values and so on) + * @return future holding the success/failure codes of individual operations and their index + */ + CollectionFuture> asyncCollectionPipedUpdate( + final String key, final CollectionPipedUpdate update) { + + if (update.getItemCount() == 0) { + throw new IllegalArgumentException( + "item count for piped operation cannot be 0."); + } + + if (update.getItemCount() > CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT) { + throw new IllegalArgumentException( + "max item count for piped operation cannot be over " + + CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT + "."); + } + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture> rv = new CollectionFuture>( + latch, operationTimeout); + + Operation op = opFact.collectionPipedUpdate(key, update, + new CollectionPipedUpdateOperation.Callback() { + Map result = new TreeMap(); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(result, cstatus); + } + + public void complete() { + latch.countDown(); + } + + public void gotStatus(Integer index, OperationStatus status) { + if (status instanceof CollectionOperationStatus) { + result.put(index, + (CollectionOperationStatus) status); + } else { + result.put(index, new CollectionOperationStatus( + status)); + } + } + }); + + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Generic pipelined update operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param updateList list of operation parameters (values and so on) + * @return future holding the success/failure codes of individual operations and their index + */ + CollectionFuture> asyncCollectionPipedUpdate( + final String key, final List> updateList) { + + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + + final CountDownLatch latch = new CountDownLatch(updateList.size()); + + final List mergedOperationStatus = Collections + .synchronizedList(new ArrayList(1)); + + final Map mergedResult = new ConcurrentHashMap(); + + for (int i = 0; i < updateList.size(); i++) { + final CollectionPipedUpdate update = updateList.get(i); + final int idx = i; + + Operation op = opFact.collectionPipedUpdate(key, update, + new CollectionPipedUpdateOperation.Callback() { + // each result status + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn( + "[PipeInsert] Unhandled state: " + + status); + return; + } + mergedOperationStatus.add(cstatus); + } + + // complete + public void complete() { + latch.countDown(); + } + + // got status + public void gotStatus(Integer index, + OperationStatus status) { + if (status instanceof CollectionOperationStatus) { + mergedResult + .put(index + + (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT), + (CollectionOperationStatus) status); + } else { + mergedResult + .put(index + + (idx * CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT), + new CollectionOperationStatus( + status)); + } + } + }); + addOp(key, op); + ops.add(op); + } + + return new CollectionFuture>( + latch, operationTimeout) { + + @Override + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + + @Override + public boolean isCancelled() { + for (Operation op : ops) { + if (op.isCancelled()) + return true; + } + return false; + }; + + @Override + public Map get(long duration, + TimeUnit units) throws InterruptedException, + TimeoutException, ExecutionException { + + if (!latch.await(duration, units)) { + for (Operation op : ops) { + MemcachedConnection.opTimedOut(op); + } + throw new CheckedOperationTimeoutException( + "Timed out waiting for operation", ops); + } else { + // continuous timeout counter will be reset + for (Operation op : ops) { + MemcachedConnection.opSucceeded(op); + } + } + + for (Operation op : ops) { + if (op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + } + if (isCancelled()) { + throw new ExecutionException(new RuntimeException( + "Cancelled")); + } + + return mergedResult; + } + + @Override + public CollectionOperationStatus getOperationStatus() { + for (OperationStatus status : mergedOperationStatus) { + if (!status.isSuccess()) { + return new CollectionOperationStatus(status); + } + } + return new CollectionOperationStatus(true, "END", + CollectionResponse.END); + } + }; + } + + /** + * Generic delete operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param collectionDelete operation parameters (element index/key, value, and so on) + * @return future holding the success/failure of the operation + */ + private CollectionFuture asyncCollectionDelete( + final String key, final CollectionDelete collectionDelete) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + Operation op = opFact.collectionDelete(key, collectionDelete, + new OperationCallback() { + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(cstatus.isSuccess(), cstatus); + if (!cstatus.isSuccess() + && getLogger().isDebugEnabled()) { + getLogger().debug( + "Deletion to the collection failed : " + + cstatus.getMessage() + + " (type=" + + collectionDelete.getClass() + .getName() + ", key=" + key + + ")"); + } + } + + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Generic existence operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param subkey element key (list index, b+tree bkey) + * @param collectionExist operation parameters (element value and so on) + * @param tc transcoder to serialize and unserialize value + * @return future holding the success/failure of the operation + */ + private CollectionFuture asyncCollectionExist( + final String key, final String subkey, + final CollectionExist collectionExist, Transcoder tc) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + CachedData cd = tc.encode(collectionExist.getValue()); + collectionExist.setData(cd.getData()); + + Operation op = opFact.collectionExist(key, subkey, collectionExist, + new OperationCallback() { + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + boolean isExist = (CollectionResponse.EXIST == cstatus + .getResponse()) ? true : false; + rv.set(isExist, cstatus); + if (!cstatus.isSuccess() + && getLogger().isDebugEnabled()) { + getLogger().debug( + "Exist command to the collection failed : " + + cstatus.getMessage() + + " (type=" + + collectionExist.getClass() + .getName() + ", key=" + key + + ", subkey=" + subkey + ")"); + } + } + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.List, int, T, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public Future> asyncSetBulk(List key, int exp, T o, Transcoder tc) { + return bulkService.setBulk(key, exp, o, tc, new ArcusClient[] { this }); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.List, int, java.lang.Object) + */ + @Override + public Future> asyncSetBulk(List key, int exp, Object o) { + return asyncSetBulk(key, exp, o, transcoder); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.Map, int, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public Future> asyncSetBulk(Map o, int exp, Transcoder tc) { + return bulkService.setBulk(o, exp, tc, new ArcusClient[] { this }); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ArcusClient#asyncSetBulk(java.util.Map, int) + */ + @Override + public Future> asyncSetBulk(Map o, int exp) { + return asyncSetBulk(o, exp, transcoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#getMaxPipedItemCount() + */ + @Override + public int getMaxPipedItemCount() { + return CollectionPipedStore.MAX_PIPED_ITEM_COUNT; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopCreate(java.lang.String, net.spy.memcached.collection.ElementValueType, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncBopCreate(String key, + ElementValueType valueType, CollectionAttributes attributes) { + int flag = CollectionTranscoder.examineFlags(valueType); + boolean noreply = false; + CollectionCreate bTreeCreate = new BTreeCreate(flag, + attributes.getExpireTime(), attributes.getMaxCount(), + attributes.getOverflowAction(), attributes.getReadable(), noreply); + return asyncCollectionCreate(key, bTreeCreate); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopCreate(java.lang.String, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncSopCreate(String key, + ElementValueType type, CollectionAttributes attributes) { + int flag = CollectionTranscoder.examineFlags(type); + boolean noreply = false; + CollectionCreate bTreeCreate = new SetCreate(flag, + attributes.getExpireTime(), attributes.getMaxCount(), attributes.getReadable(), noreply); + return asyncCollectionCreate(key, bTreeCreate); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopCreate(java.lang.String, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncLopCreate(String key, + ElementValueType type, CollectionAttributes attributes) { + int flag = CollectionTranscoder.examineFlags(type); + boolean noreply = false; + CollectionCreate bTreeCreate = new ListCreate(flag, + attributes.getExpireTime(), attributes.getMaxCount(), + attributes.getOverflowAction(), attributes.getReadable(), noreply); + return asyncCollectionCreate(key, bTreeCreate); + } + + /** + * Generic create operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param collectionCreate operation parameters (flags, expiration time, and so on) + * @return future holding the success/failure of the operation + */ + CollectionFuture asyncCollectionCreate(final String key, + final CollectionCreate collectionCreate) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + + Operation op = opFact.collectionCreate(key, collectionCreate, + new OperationCallback() { + @Override + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(cstatus.isSuccess(), cstatus); + if (!cstatus.isSuccess() + && getLogger().isDebugEnabled()) { + getLogger() + .debug("Insertion to the collection failed : " + + cstatus.getMessage() + + " (type=" + + collectionCreate.getClass() + .getName() + + ", key=" + + key + + ", attribute=" + + collectionCreate.toString() + ")"); + } + } + + @Override + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, boolean, boolean) + */ + @Override + public CollectionFuture>> asyncBopGet(String key, + long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty) { + BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter); + return asyncBopGet(key, get, false, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, long, int, int, boolean, boolean) + */ + @Override + public CollectionFuture>> asyncBopGet(String key, + long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, + boolean withDelete, boolean dropIfEmpty) { + BTreeGet get = new BTreeGet(from, to, offset, count, + withDelete, dropIfEmpty, eFlagFilter); + boolean reverse = from > to; + return asyncBopGet(key, get, reverse, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture>> asyncBopGet(String key, + long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + BTreeGet get = new BTreeGet(bkey, withDelete, dropIfEmpty, eFlagFilter); + return asyncBopGet(key, get, false, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, long, long, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture>> asyncBopGet(String key, + long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + BTreeGet get = new BTreeGet(from, to, offset, count, withDelete, + dropIfEmpty, eFlagFilter); + boolean reverse = from > to; + return asyncBopGet(key, get, reverse, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, boolean, boolean) + */ + @Override + public CollectionFuture> asyncLopGet(String key, int index, + boolean withDelete, boolean dropIfEmpty) { + ListGet get = new ListGet(index, withDelete, dropIfEmpty); + return asyncLopGet(key, get, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, int, boolean, boolean) + */ + @Override + public CollectionFuture> asyncLopGet(String key, int from, + int to, boolean withDelete, boolean dropIfEmpty) { + ListGet get = new ListGet(from, to, withDelete, dropIfEmpty); + return asyncLopGet(key, get, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncLopGet(String key, int index, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + ListGet get = new ListGet(index, withDelete, dropIfEmpty); + return asyncLopGet(key, get, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopGet(java.lang.String, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncLopGet(String key, int from, + int to, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + ListGet get = new ListGet(from, to, withDelete, dropIfEmpty); + return asyncLopGet(key, get, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopGet(java.lang.String, int, boolean, boolean) + */ + @Override + public CollectionFuture> asyncSopGet(String key, int count, + boolean withDelete, boolean dropIfEmpty) { + SetGet get = new SetGet(count, withDelete, dropIfEmpty); + return asyncSopGet(key, get, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopGet(java.lang.String, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncSopGet(String key, int count, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + SetGet get = new SetGet(count, withDelete, dropIfEmpty); + return asyncSopGet(key, get, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, long, boolean) + */ + @Override + public CollectionFuture asyncBopDelete(String key, long bkey, + ElementFlagFilter eFlagFilter, boolean dropIfEmpty) { + BTreeDelete delete = new BTreeDelete(bkey, false, + dropIfEmpty, eFlagFilter); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, long, long, int, boolean) + */ + @Override + public CollectionFuture asyncBopDelete(String key, long from, + long to, ElementFlagFilter eFlagFilter, int count, boolean dropIfEmpty) { + BTreeDelete delete = new BTreeDelete(from, to, count, + false, dropIfEmpty, eFlagFilter); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopDelete(java.lang.String, int, boolean) + */ + @Override + public CollectionFuture asyncLopDelete(String key, int index, + boolean dropIfEmpty) { + ListDelete delete = new ListDelete(index, false, + dropIfEmpty); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopDelete(java.lang.String, int, int, boolean) + */ + @Override + public CollectionFuture asyncLopDelete(String key, int from, + int to, boolean dropIfEmpty) { + ListDelete delete = new ListDelete(from, to, false, + dropIfEmpty); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopDelete(java.lang.String, java.lang.Object, boolean) + */ + @Override + public CollectionFuture asyncSopDelete(String key, Object value, + boolean dropIfEmpty) { + SetDelete delete = new SetDelete(value, false, + dropIfEmpty); + delete.setData(collectionTranscoder.encode(value).getData()); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopDelete(java.lang.String, java.lang.Object, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncSopDelete(String key, T value, + boolean dropIfEmpty, Transcoder tc) { + SetDelete delete = new SetDelete(value, false, dropIfEmpty); + delete.setData(tc.encode(value).getData()); + return asyncCollectionDelete(key, delete); + } + + /** + * Generic count operation for collection items. Public methods for collection items call this method. + * + * @param k collection item's key + * @param collectionCount operation parameters (element key range, eflags, and so on) + * @return future holding the element count + */ + private CollectionFuture asyncCollectionCount(final String k, + final CollectionCount collectionCount) { + + final CountDownLatch latch = new CountDownLatch(1); + + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + + Operation op = opFact.collectionCount(k, collectionCount, + new OperationCallback() { + + @Override + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + + if (cstatus.isSuccess()) { + rv.set(new Integer(cstatus.getMessage()), + new CollectionOperationStatus( + new OperationStatus(true, "END"))); + return; + } + + rv.set(null, cstatus); + } + + @Override + public void complete() { + latch.countDown(); + } + }); + + rv.setOperation(op); + addOp(k, op); + return rv; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGetItemCount(java.lang.String, long, long) + */ + @Override + public CollectionFuture asyncBopGetItemCount(String key, + long from, long to, ElementFlagFilter eFlagFilter) { + CollectionCount collectionCount = new BTreeCount(from, to, eFlagFilter); + return asyncCollectionCount(key, collectionCount); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], long, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncBopInsert(String key, long bkey, + byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { + BTreeStore bTreeStore = new BTreeStore(value, + eFlag, (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionStore(key, String.valueOf(bkey), bTreeStore, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopInsert(java.lang.String, int, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncLopInsert(String key, int index, + Object value, CollectionAttributes attributesForCreate) { + ListStore listStore = new ListStore(value, + (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionStore(key, String.valueOf(index), listStore, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopInsert(java.lang.String, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncSopInsert(String key, Object value, + CollectionAttributes attributesForCreate) { + SetStore setStore = new SetStore(value, + (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionStore(key, "", setStore, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, long, byte[], java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncBopInsert(String key, long bkey, + byte[] eFlag, T value, CollectionAttributes attributesForCreate, Transcoder tc) { + BTreeStore bTreeStore = new BTreeStore(value, eFlag, + (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionStore(key, String.valueOf(bkey), bTreeStore, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopInsert(java.lang.String, int, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncLopInsert(String key, int index, + T value, CollectionAttributes attributesForCreate, Transcoder tc) { + ListStore listStore = new ListStore(value, (attributesForCreate != null), + null, attributesForCreate); + return asyncCollectionStore(key, String.valueOf(index), listStore, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopInsert(java.lang.String, java.lang.Object, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncSopInsert(String key, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + SetStore setStore = new SetStore(value, (attributesForCreate != null), + null, attributesForCreate); + return asyncCollectionStore(key, "", setStore, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.Map, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, Map elements, + CollectionAttributes attributesForCreate) { + return asyncBopPipedInsertBulk(key, elements, attributesForCreate, + collectionTranscoder); + } + + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopPipedInsertBulk(java.lang.String, int, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture> asyncLopPipedInsertBulk( + String key, int index, List valueList, CollectionAttributes attributesForCreate) { + return asyncLopPipedInsertBulk(key, index, valueList, attributesForCreate, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture> asyncSopPipedInsertBulk( + String key, List valueList, CollectionAttributes attributesForCreate) { + return asyncSopPipedInsertBulk(key, valueList, attributesForCreate, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.Map, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, Map elements, + CollectionAttributes attributesForCreate, Transcoder tc) { + if (elements.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { + BTreePipedStore store = new BTreePipedStore(key, elements, + (attributesForCreate != null), attributesForCreate, tc); + return asyncCollectionPipedStore(key, store); + } else { + List> storeList = new ArrayList>(); + + PartitionedMap list = new PartitionedMap( + elements, CollectionPipedStore.MAX_PIPED_ITEM_COUNT); + + for (int i = 0; i < list.size(); i++) { + storeList + .add(new BTreePipedStore(key, list.get(i), + (attributesForCreate != null), + attributesForCreate, tc)); + } + return asyncCollectionPipedStore(key, storeList); + } + } + + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopPipedInsertBulk(java.lang.String, int, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncLopPipedInsertBulk( + String key, int index, List valueList, + CollectionAttributes attributesForCreate, Transcoder tc) { + if (valueList.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { + ListPipedStore store = new ListPipedStore(key, index, + valueList, (attributesForCreate != null), + attributesForCreate, tc); + return asyncCollectionPipedStore(key, store); + } else { + PartitionedList list = new PartitionedList(valueList, + CollectionPipedStore.MAX_PIPED_ITEM_COUNT); + + List> storeList = new ArrayList>( + list.size()); + + for (int i = 0; i < list.size(); i++) { + storeList + .add(new ListPipedStore(key, index, list.get(i), + (attributesForCreate != null), + attributesForCreate, tc)); + } + return asyncCollectionPipedStore(key, storeList); + } + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncSopPipedInsertBulk( + String key, List valueList, + CollectionAttributes attributesForCreate, Transcoder tc) { + if (valueList.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { + SetPipedStore store = new SetPipedStore(key, valueList, + (attributesForCreate != null), attributesForCreate, tc); + return asyncCollectionPipedStore(key, store); + } else { + PartitionedList list = new PartitionedList(valueList, + CollectionPipedStore.MAX_PIPED_ITEM_COUNT); + + List> storeList = new ArrayList>( + list.size()); + + for (int i = 0; i < list.size(); i++) { + storeList + .add(new SetPipedStore(key, list.get(i), + (attributesForCreate != null), + attributesForCreate, tc)); + } + + return asyncCollectionPipedStore(key, storeList); + } + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#flush(java.lang.String) + */ + @Override + public OperationFuture flush(final String prefix) { + return flush(prefix, -1); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#flush(java.lang.String, int) + */ + @Override + public OperationFuture flush(final String prefix, final int delay) { + final AtomicReference flushResult = new AtomicReference( + null); + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + + CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { + public Operation newOp(final MemcachedNode n, + final CountDownLatch latch) { + Operation op = opFact.flush(prefix, delay, false, + new OperationCallback() { + public void receivedStatus(OperationStatus s) { + flushResult.set(s.isSuccess()); + } + + public void complete() { + latch.countDown(); + } + }); + ops.add(op); + return op; + } + }); + + return new OperationFuture(blatch, flushResult, + operationTimeout) { + @Override + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + + @Override + public boolean isCancelled() { + boolean rv = false; + for (Operation op : ops) { + rv |= op.isCancelled(); + } + return rv; + } + + @Override + public boolean isDone() { + boolean rv = true; + for (Operation op : ops) { + rv &= op.getState() == OperationState.COMPLETE; + } + return rv || isCancelled(); + } + }; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, long, long, int, int) + */ + @Override + public SMGetFuture>> asyncBopSortMergeGet( + List keyList, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count) { + if (keyList == null || keyList.isEmpty()) { + throw new IllegalArgumentException("Key list is empty."); + } + if (count < 1) { + throw new IllegalArgumentException( + "Value of 'count' must be larger than 0."); + } + if (offset + count > MAX_SMGET_COUNT) { + throw new IllegalArgumentException( + "Cannot value of 'offset + count' larger than " + MAX_SMGET_COUNT); + } + + Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize); + List> smGetList = new ArrayList>( + arrangedKey.size()); + for (List v : arrangedKey.values()) { + if (arrangedKey.size() > 1) { + smGetList.add(new BTreeSMGetWithLongTypeBkey(v, from, to, eFlagFilter, 0, offset + count)); + }else { + smGetList.add(new BTreeSMGetWithLongTypeBkey(v, from, to, eFlagFilter, offset, count)); + } + } + return smget(smGetList, offset, count, (from > to), + collectionTranscoder); + } + + /** + * Turn the list of keys into groups of keys. All keys in a group belong to the same memcached server. + * + * @param keyList list of keys + * @param groupSize max size of the key group (number of keys) + * @return map of group name (memcached node + sequence number) and keys in the group + */ + private Map> groupingKeys(List keyList, int groupSize) { + Map chunkCount = new HashMap(); + Map> result = new HashMap>(); + + MemcachedConnection conn = getMemcachedConnection(); + + for (String k : keyList) { + validateKey(k); + String node = conn.findNodeByKey(k).getSocketAddress().toString(); + int cc; + if (chunkCount.containsKey(node)) { + cc = chunkCount.get(node); + } else { + cc = 0; + chunkCount.put(node, 0); + } + + String resultKey = node + cc; + + List arrangedKeyList = null; + + if (result.containsKey(resultKey)) { + if (result.get(resultKey).size() >= groupSize) { + arrangedKeyList = new ArrayList(); + cc++; + result.put(node + cc, arrangedKeyList); + chunkCount.put(node, cc); + } else { + arrangedKeyList = result.get(resultKey); + } + } else { + arrangedKeyList = new ArrayList(); + result.put(resultKey, arrangedKeyList); + } + arrangedKeyList.add(k); + } + return result; + } + + /** + * Get the sublist of elements from the smget result. + * + * @param mergedResult smget result (list of elements) + * @param offset start index, negative offset indicates "start from the tail" + * @param count number of elements to get + * @return list of elements + */ + private List> getSubList( + final List> mergedResult, int offset, int count) { + if (mergedResult.size() > count) { + int toIndex = (count + offset > mergedResult.size()) ? mergedResult + .size() : count + offset; + if (offset > toIndex) + return Collections.emptyList(); + return mergedResult.subList(offset, toIndex); + } else { + if (offset > 0) { + int toIndex = (count + offset > mergedResult.size()) ? mergedResult + .size() : count + offset; + + if (offset > toIndex) + return Collections.emptyList(); + return mergedResult.subList(offset, toIndex); + } else { + return mergedResult; + } + } + } + + /** + * Generic smget operation for b+tree items. Public smget methods call this method. + * + * @param smGetList smget parameters (keys, eflags, and so on) + * @param offset start index of the elements + * @param count number of elements to fetch + * @param reverse forward or backward + * @param tc transcoder to serialize and unserialize element value + * @return future holding the smget result (elements, return codes, and so on) + */ + private SMGetFuture>> smget( + final List> smGetList, final int offset, + final int count, final boolean reverse, final Transcoder tc) { + + final String END = "END"; + final String TRIMMED = "TRIMMED"; + final String DUPLICATED = "DUPLICATED"; + final String DUPLICATED_TRIMMED = "DUPLICATED_TRIMMED"; + + final CountDownLatch blatch = new CountDownLatch(smGetList.size()); + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + final List missedKey = Collections.synchronizedList(new ArrayList()); + final int totalResultElementCount = count + offset; + + final List> mergedResult = Collections.synchronizedList(new ArrayList>(totalResultElementCount)); + + final ReentrantLock lock = new ReentrantLock(); + + final List resultOperationStatus = Collections.synchronizedList(new ArrayList(1)); + + final List failedOperationStatus = Collections.synchronizedList(new ArrayList(1)); + + final Set totalBkey = new TreeSet(); + + final AtomicBoolean stopCollect = new AtomicBoolean(false); + + for (BTreeSMGet smGet : smGetList) { + Operation op = opFact.bopsmget(smGet, new BTreeSortMergeGetOperation.Callback() { + final List> eachResult = new ArrayList>(); + + private void addTotalBkey(List> smgetresult) { + for (SMGetElement each : smgetresult) { + if (each.getBkeyByObject() instanceof byte[]) { + totalBkey.add(new ByteArrayBKey((byte[]) each.getBkeyByObject())); + } else { + totalBkey.add(each.getBkeyByObject()); + } + } + } + + private boolean addTotalBkey(Object bkey) { + if (bkey instanceof byte[]) { + return totalBkey.add(new ByteArrayBKey((byte[])bkey)); + } else { + return totalBkey.add(bkey); + } + } + + @Override + public void receivedStatus(OperationStatus status) { + if (status.isSuccess()) { + resultOperationStatus.add(status); + } else { + stopCollect.set(true); + mergedResult.clear(); + failedOperationStatus.add(status); + } + if (status.isSuccess()) { + lock.lock(); + try { + // merged result is empty, add all. + if (smGetList.size() == 1) { + addTotalBkey(eachResult); + mergedResult.addAll(eachResult); + } else { + // merged result is empty, add all. + if (mergedResult.size() == 0) { + addTotalBkey(eachResult); + mergedResult.addAll(eachResult); + } else { + // remove trimmed area + if (TRIMMED.equals(status.getMessage())) { + + } + + // do sort merge + for (SMGetElement result : eachResult) { + boolean added = false; + + for (int i = 0; i < mergedResult.size(); i++) { + if (i > totalResultElementCount) { + added = true; + break; + } + + if ((reverse) ? (0 > result.compareTo(mergedResult.get(i))) : 0 < result + .compareTo(mergedResult.get(i))) { + if (!addTotalBkey(result.getBkeyByObject())) { + resultOperationStatus.add(new OperationStatus(true, "DUPLICATED")); + } + mergedResult.add(i, result); + added = true; + break; + } + } + + if (!added) { + if (!addTotalBkey(result.getBkeyByObject())) { + resultOperationStatus.add(new OperationStatus(true, "DUPLICATED")); + } + mergedResult.add(result); + } + } + } + } + } finally { + lock.unlock(); + } + } else { + getLogger().warn("SMGetFailed. status=%s", status); + } + } + + @Override + public void complete() { + blatch.countDown(); + } + + @Override + public void gotData(String key, Object subkey, int flags, byte[] data) { + if (stopCollect.get()) + return; + + if (subkey instanceof Long) { + eachResult.add(new SMGetElement(key, (Long) subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); + } else if (subkey instanceof byte[]) { + eachResult.add(new SMGetElement(key, (byte[]) subkey, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); + } + } + + @Override + public void gotMissedKey(byte[] data) { + missedKey.add(new String(data)); + } + }); + ops.add(op); + addOp(smGet.getRepresentKey(), op); + } + + return new SMGetFuture>>(ops, operationTimeout) { + @Override + public List> get(long duration, TimeUnit units) + throws InterruptedException, TimeoutException, + ExecutionException { + if (!blatch.await(duration, units)) { + for (Operation op : ops) { + MemcachedConnection.opTimedOut(op); + } + throw new CheckedOperationTimeoutException( + "Timed out waiting for operation", ops); + } else { + // continuous timeout counter will be reset + for (Operation op : ops) { + MemcachedConnection.opSucceeded(op); + } + } + + for (Operation op : ops) { + if (op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + } + if (isCancelled()) { + throw new ExecutionException(new RuntimeException( + "Cancelled")); + } + + if (smGetList.size() == 1) + return mergedResult; + + return getSubList(mergedResult, offset, count); + } + + @Override + public List getMissedKeyList() { + return missedKey; + } + + @Override + public CollectionOperationStatus getOperationStatus() { + if (failedOperationStatus.size() > 0) { + return new CollectionOperationStatus( + failedOperationStatus.get(0)); + } + + OperationStatus end = null; + OperationStatus duplicated = null; + OperationStatus trimmed = null; + OperationStatus duplicatedTrimmed = null; + + for (OperationStatus status : resultOperationStatus) { + if (END.equals(status.getMessage())) + end = status; + else if (DUPLICATED.equals(status.getMessage())) + duplicated = status; + else if (TRIMMED.equals(status.getMessage())) + trimmed = status; + else if (DUPLICATED_TRIMMED.equals(status.getMessage())) + duplicatedTrimmed = status; + } + + if (end == null && duplicated == null && trimmed == null + && duplicatedTrimmed == null) { + getLogger().warn("[sort merge get] invalid result status."); + return null; + } + + if (duplicatedTrimmed != null + || (duplicated != null && trimmed != null)) + return new CollectionOperationStatus(duplicatedTrimmed); + else if (duplicated != null) + return new CollectionOperationStatus(duplicated); + else if (trimmed != null) + return new CollectionOperationStatus(trimmed); + else + return new CollectionOperationStatus(end); + } + }; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, long, java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncBopUpsert(String key, long bkey, + byte[] elementFlag, Object value, CollectionAttributes attributesForCreate) { + + BTreeUpsert bTreeStore = new BTreeUpsert(value, + elementFlag, (attributesForCreate != null), null, attributesForCreate); + + return asyncCollectionUpsert(key, String.valueOf(bkey), bTreeStore, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, long, java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncBopUpsert(String key, long bkey, + byte[] elementFlag, T value, CollectionAttributes attributesForCreate, + Transcoder tc) { + + BTreeUpsert bTreeStore = new BTreeUpsert(value, elementFlag, + (attributesForCreate != null), null, attributesForCreate); + + return asyncCollectionUpsert(key, String.valueOf(bkey), bTreeStore, tc); + } + + private CollectionFuture asyncCollectionUpsert( + final String key, final String subkey, + final CollectionStore collectionStore, Transcoder tc) { + + CachedData co = tc.encode(collectionStore.getValue()); + collectionStore.setFlags(co.getFlags()); + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + Operation op = opFact.collectionUpsert(key, subkey, collectionStore, + co.getData(), new OperationCallback() { + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(cstatus.isSuccess(), cstatus); + if (!cstatus.isSuccess() + && getLogger().isDebugEnabled()) { + getLogger().debug( + "Insertion to the collection failed : " + + cstatus.getMessage() + + " (type=" + + collectionStore.getClass() + .getName() + ", key=" + key + + ", subkey=" + subkey + ", value=" + + collectionStore.getValue() + ")"); + } + } + + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, long, java.lang.Object, net.spy.memcached.collection.ElementFlagUpdate) + */ + @Override + public CollectionFuture asyncBopUpdate(String key, long bkey, + ElementFlagUpdate eFlagUpdate, Object value) { + BTreeUpdate collectionUpdate = new BTreeUpdate( + value, eFlagUpdate, false); + return asyncCollectionUpdate(key, String.valueOf(bkey), + collectionUpdate, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, long, java.lang.Object, net.spy.memcached.collection.ElementFlagUpdate, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncBopUpdate(String key, long bkey, + ElementFlagUpdate eFlagUpdate, T value, Transcoder tc) { + BTreeUpdate collectionUpdate = new BTreeUpdate(value, + eFlagUpdate, false); + return asyncCollectionUpdate(key, String.valueOf(bkey), + collectionUpdate, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagUpdate, java.lang.Object) + */ + @Override + public CollectionFuture asyncBopUpdate(String key, + byte[] bkey, ElementFlagUpdate eFlagUpdate, Object value) { + BTreeUpdate collectionUpdate = new BTreeUpdate( + value, eFlagUpdate, false); + return asyncCollectionUpdate(key, BTreeUtil.toHex(bkey), + collectionUpdate, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagUpdate, java.lang.Object, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncBopUpdate(String key, + byte[] bkey, ElementFlagUpdate eFlagUpdate, T value, + Transcoder tc) { + BTreeUpdate collectionUpdate = new BTreeUpdate(value, + eFlagUpdate, false); + return asyncCollectionUpdate(key, BTreeUtil.toHex(bkey), + collectionUpdate, tc); + } + + /** + * Generic update operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param subkey element key (list index, b+tree bkey) + * @param collectionUpdate operation parameters (element value and so on) + * @param tc transcoder to serialize and unserialize value + * @return future holding the success/failure of the operation + */ + private CollectionFuture asyncCollectionUpdate( + final String key, final String subkey, + final CollectionUpdate collectionUpdate, Transcoder tc) { + + CachedData co = null; + if (collectionUpdate.getNewValue() != null) { + co = tc.encode(collectionUpdate.getNewValue()); + collectionUpdate.setFlags(co.getFlags()); + } + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture( + latch, operationTimeout); + + Operation op = opFact.collectionUpdate(key, subkey, collectionUpdate, + ((co == null) ? null : co.getData()), new OperationCallback() { + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(cstatus.isSuccess(), cstatus); + if (!cstatus.isSuccess() + && getLogger().isDebugEnabled()) { + getLogger().debug( + "Insertion to the collection failed : " + + cstatus.getMessage() + + " (type=" + + collectionUpdate.getClass() + .getName() + ", key=" + key + + ", subkey=" + subkey + ", value=" + + collectionUpdate.getNewValue() + + ")"); + } + } + + public void complete() { + latch.countDown(); + } + }); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* + * (non-Javadoc) + * + * @see net.spy.memcached.ArcusClientIF#asyncBopUpdate(java.lang.String, + * byte[], net.spy.memcached.collection.ElementFlagUpdate, java.lang.Object, + * net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncBopPipedUpdateBulk( + String key, List> elements) { + return asyncBopPipedUpdateBulk(key, elements, collectionTranscoder); + } + + @Override + public CollectionFuture> asyncBopPipedUpdateBulk( + String key, List> elements, Transcoder tc) { + + if (elements.size() <= CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT) { + CollectionPipedUpdate collectionPipedUpdate = new BTreePipedUpdate( + key, elements, tc); + return asyncCollectionPipedUpdate(key, collectionPipedUpdate); + } else { + PartitionedList> list = new PartitionedList>( + elements, CollectionPipedUpdate.MAX_PIPED_ITEM_COUNT); + + List> collectionPipedUpdateList = new ArrayList>( + list.size()); + + for (int i = 0; i < list.size(); i++) { + collectionPipedUpdateList.add(new BTreePipedUpdate(key, list + .get(i), tc)); + } + + return asyncCollectionPipedUpdate(key, collectionPipedUpdateList); + } + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncBopInsert(String key, byte[] bkey, + byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { + BTreeStore bTreeStore = new BTreeStore(value, + eFlag, (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionStore(key, + BTreeUtil.toHex(bkey), bTreeStore, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopInsert(java.lang.String, byte[], java.lang.Object, byte[], boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncBopInsert(String key, + byte[] bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + BTreeStore bTreeStore = new BTreeStore(value, eFlag, + (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionStore(key, + BTreeUtil.toHex(bkey), bTreeStore, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], byte[], int, int, boolean, boolean, net.spy.memcached.collection.ElementFlagFilter) + */ + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] bkey, ElementFlagFilter eFlagFilter, + boolean withDelete, boolean dropIfEmpty) { + ExtendedBTreeGet get = new ExtendedBTreeGet(bkey, bkey, + 0, 1, withDelete, dropIfEmpty, eFlagFilter); + return asyncBopExtendedGet(key, get, false, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagFilter, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] bkey, ElementFlagFilter eFlagFilter, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + ExtendedBTreeGet get = new ExtendedBTreeGet(bkey, bkey, 0, 1, + withDelete, dropIfEmpty, eFlagFilter); + return asyncBopExtendedGet(key, get, false, tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], byte[], int, int, boolean, boolean, net.spy.memcached.collection.ElementFlagFilter) + */ + @Override + public CollectionFuture>> asyncBopGet(String key, + byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, + int count, boolean withDelete, boolean dropIfEmpty) { + ExtendedBTreeGet get = new ExtendedBTreeGet(from, to, + offset, count, withDelete, dropIfEmpty, eFlagFilter); + + boolean reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; + + return asyncBopExtendedGet(key, get, reverse, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGet(java.lang.String, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int, boolean, boolean, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, + int count, boolean withDelete, boolean dropIfEmpty, + Transcoder tc) { + ExtendedBTreeGet get = new ExtendedBTreeGet(from, to, offset, + count, withDelete, dropIfEmpty, eFlagFilter); + boolean reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; + return asyncBopExtendedGet(key, get, reverse, tc); + } + + /** + * Generic get operation for b+tree items using byte-array type bkeys. Public methods for b+tree items call this method. + * + * @param k b+tree item's key + * @param collectionGet operation parameters (element key and so on) + * @param reverse forward or backward + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of the fetched element and its byte-array bkey + */ + private CollectionFuture>> asyncBopExtendedGet( + final String k, final CollectionGet collectionGet, + final boolean reverse, final Transcoder tc) { + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture>> rv = new CollectionFuture>>( + latch, operationTimeout); + + Operation op = opFact.collectionGet2(k, collectionGet, + new ExtendedBTreeGetOperation.Callback() { + TreeMap> map = new ByteArrayTreeMap>( + (reverse) ? Collections.reverseOrder() : null); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(map, cstatus); + return; + } + switch (cstatus.getResponse()) { + case NOT_FOUND: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) not found : %s", k, + cstatus); + } + break; + case NOT_FOUND_ELEMENT: + rv.set(map, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found : %s", + k, cstatus); + } + break; + case UNREADABLE: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) is not readable : %s", + k, cstatus); + } + break; + } + } + + public void complete() { + latch.countDown(); + } + + public void gotData(String key, byte[] subkey, + byte[] elementFlag, int flags, byte[] data) { + assert key.equals(k) : "Wrong key returned"; + Element element = new Element(subkey, tc + .decode(new CachedData(flags, data, tc + .getMaxSize())), elementFlag); + map.put(new ByteArrayBKey(subkey), element); + } + }); + rv.setOperation(op); + addOp(k, op); + return rv; + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int pos) { + BTreeGetByPosition get = new BTreeGetByPosition(order, pos); + boolean reverse = false; + return asyncBopGetByPosition(key, get, reverse, collectionTranscoder); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int pos, Transcoder tc) { + BTreeGetByPosition get = new BTreeGetByPosition(order, pos); + boolean reverse = false; + return asyncBopGetByPosition(key, get, reverse, tc); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int from, int to) { + BTreeGetByPosition get = new BTreeGetByPosition(order, from, to); + boolean reverse = from > to; + return asyncBopGetByPosition(key, get, reverse, collectionTranscoder); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int from, int to, Transcoder tc) { + BTreeGetByPosition get = new BTreeGetByPosition(order, from, to); + boolean reverse = from > to; + return asyncBopGetByPosition(key, get, reverse, tc); + } + + /** + * Generic get operation for b+tree items using positions. Public methods for b+tree items call this method. + * + * @param k b+tree item's key + * @param get operation parameters (element position and so on) + * @param reverse forward or backward + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of the fetched element and its position + */ + private CollectionFuture>> asyncBopGetByPosition( + final String k, final BTreeGetByPosition get, + final boolean reverse, final Transcoder tc) { + // Check for invalid arguments (not to get CLIENT_ERROR) + if (get.getOrder() == null) { + throw new IllegalArgumentException("BTreeOrder should not be null"); + } + if (get.getPosFrom() < 0 || get.getPosTo() < 0) { + throw new IllegalArgumentException("Positions should be 0 or positive integer"); + } + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture>> rv = new CollectionFuture>>( + latch, operationTimeout); + + Operation op = opFact.bopGetByPosition(k, get, new BTreeGetByPositionOperation.Callback() { + + TreeMap> map = new TreeMap>( + (reverse) ? Collections.reverseOrder() : null); + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(map, cstatus); + return; + } + switch (cstatus.getResponse()) { + case NOT_FOUND: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) not found : %s", k, + cstatus); + } + break; + case NOT_FOUND_ELEMENT: + rv.set(map, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found : %s", + k, cstatus); + } + break; + case UNREADABLE: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) is not readable : %s", + k, cstatus); + } + break; + case TYPE_MISMATCH: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) is not a B+Tree : %s", + k, cstatus); + } + break; + default: + getLogger().warn("Unhandled state: " + status); + } + } + + public void complete() { + latch.countDown(); + } + + public void gotData(String key, int flags, int pos, BKeyObject bkeyObject, byte[] eflag, byte[] data) { + assert key.equals(k) : "Wrong key returned"; + Element element = makeBTreeElement(key, flags, bkeyObject, eflag, data, tc); + + if (element != null) { + map.put(pos, element); + } + } + }); + rv.setOperation(op); + addOp(k, op); + return rv; + } + + @Override + public CollectionFuture asyncBopFindPosition(String key, long longBKey, + BTreeOrder order) { + if (order == null) { + throw new IllegalArgumentException("BTreeOrder should not be null"); + } + BTreeFindPosition get = new BTreeFindPosition(longBKey, order); + return asyncBopFindPosition(key, get); + } + + @Override + public CollectionFuture asyncBopFindPosition(String key, byte[] byteArrayBKey, + BTreeOrder order) { + if (order == null) { + throw new IllegalArgumentException("BTreeOrder should not be null"); + } + BTreeFindPosition get = new BTreeFindPosition(byteArrayBKey, order); + return asyncBopFindPosition(key, get); + } + + /** + * Generic find-position operation for b+tree items. Public methods for b+tree items call this method. + * + * @param k b+tree item's key + * @param get operation parameters (element key and so on) + * @return future holding the element's position + */ + private CollectionFuture asyncBopFindPosition(final String k, final BTreeFindPosition get) { + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture rv = new CollectionFuture(latch, operationTimeout); + + Operation op = opFact.bopFindPosition(k, get, new BTreeFindPositionOperation.Callback() { + + int position = 0; + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(position, cstatus); + return; + } + switch (cstatus.getResponse()) { + case NOT_FOUND: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Key(%s) not found : %s", k, + cstatus); + } + break; + case NOT_FOUND_ELEMENT: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Element(%s) not found : %s", + k, cstatus); + } + break; + case UNREADABLE: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) is not readable : %s", + k, cstatus); + } + break; + case BKEY_MISMATCH: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) has wrong bkey : %s(%s)", + k, cstatus, get.getBkeyObject().getType()); + } + break; + case TYPE_MISMATCH: + rv.set(null, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Collection(%s) is not a B+Tree : %s", + k, cstatus); + } + break; + default: + getLogger().warn("Unhandled state: " + status); + } + } + + public void complete() { + latch.countDown(); + } + + public void gotData(int position) { + this.position = position; + } + }); + rv.setOperation(op); + addOp(k, op); + return rv; + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.INSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, collectionTranscoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.INSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, transcoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.INSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, collectionTranscoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.INSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, transcoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.UPSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, collectionTranscoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.UPSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, transcoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.UPSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, collectionTranscoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + BTreeStoreAndGet get = new BTreeStoreAndGet( + BTreeStoreAndGet.Command.UPSERT, bkey, + eFlag, value, attributesForCreate); + return asyncBTreeStoreAndGet(key, get, transcoder); + } + + /** + * Insert/upsert and get the trimmed element for b+tree items. Public methods call this method. + * + * @param k b+tree item's key + * @param get operation parameters (element key and so on) + * @param tc transcoder to serialize and unserialize value + * @return future holding the success/failure of the operation and the trimmed element + */ + private BTreeStoreAndGetFuture asyncBTreeStoreAndGet( + final String k, final BTreeStoreAndGet get, + final Transcoder tc) { + CachedData co = tc.encode(get.getValue()); + get.setFlags(co.getFlags()); + + final CountDownLatch latch = new CountDownLatch(1); + final BTreeStoreAndGetFuture rv = new BTreeStoreAndGetFuture( + latch, operationTimeout); + + Operation op = opFact.bopStoreAndGet(k, get, co.getData(), + new BTreeStoreAndGetOperation.Callback() { + Element element = null; + + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + if (cstatus.isSuccess()) { + rv.set(true, cstatus); + rv.setElement(element); + return; + } + switch (cstatus.getResponse()) { + case NOT_FOUND: + case ELEMENT_EXISTS: + case OVERFLOWED: + case OUT_OF_RANGE: + case TYPE_MISMATCH: + case LENGTH_MISMATCH: + case BKEY_MISMATCH: + rv.set(false, cstatus); + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request for \"%s\" was not successful : %s", + k, cstatus); + } + break; + default: + getLogger().warn("Unhandled state: " + status); + } + } + + public void complete() { + latch.countDown(); + } + + public void gotData(String key, int flags, BKeyObject bkeyObject, + byte[] eflag, byte[] data) { + assert key.equals(k) : "Wrong key returned"; + element = makeBTreeElement(key, flags, bkeyObject, eflag, data, tc); + } + }); + rv.setOperation(op); + addOp(k, op); + return rv; + } + + /** + * Utility method to create a b+tree element from individual parameters. + * + * @param key b+tree item's key + * @param flags item flags, used when creating the item (see createKeyIfNotExists) + * @param bkey element key + * @param eflag element flags + * @param value element value + * @param tc transcoder to serialize and unserialize value + * @return element object containing all the parameters and transcoded value + */ + private Element makeBTreeElement(String key, int flags, + BKeyObject bkey, byte[] eflag, byte[] data, Transcoder tc) { + Element element = null; + T value = tc.decode(new CachedData(flags, data, tc.getMaxSize())); + + switch (bkey.getType()) { + case LONG: + element = new Element(bkey.getLongBKey(), value, eflag); + break; + case BYTEARRAY: + element = new Element(bkey.getByteArrayBKeyRaw(), value, eflag); + break; + default: + getLogger().error( + "Unexpected bkey type : (key:" + key + ", bkey:" + + bkey.toString() + ")"); + } + + return element; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, boolean) + */ + @Override + public CollectionFuture asyncBopDelete(String key, byte[] from, + byte[] to, ElementFlagFilter eFlagFilter, int count, boolean dropIfEmpty) { + BTreeDelete delete = new BTreeDelete(from, to, count, + false, dropIfEmpty, eFlagFilter); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopDelete(java.lang.String, byte[], net.spy.memcached.collection.ElementFlagFilter, boolean) + */ + @Override + public CollectionFuture asyncBopDelete(String key, + byte[] bkey, ElementFlagFilter eFlagFilter, boolean dropIfEmpty) { + BTreeDelete delete = new BTreeDelete(bkey, false, + dropIfEmpty, eFlagFilter); + return asyncCollectionDelete(key, delete); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, byte[], byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture asyncBopUpsert(String key, + byte[] bkey, byte[] elementFlag, Object value, + CollectionAttributes attributesForCreate) { + BTreeUpsert bTreeStore = new BTreeUpsert(value, + elementFlag, (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionUpsert(key, BTreeUtil.toHex(bkey), bTreeStore, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopUpsert(java.lang.String, byte[], byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture asyncBopUpsert(String key, + byte[] bkey, byte[] elementFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + BTreeUpsert bTreeStore = new BTreeUpsert(value, elementFlag, + (attributesForCreate != null), null, attributesForCreate); + return asyncCollectionUpsert(key, BTreeUtil.toHex(bkey), bTreeStore, + tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGetItemCount(java.lang.String, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter) + */ + @Override + public CollectionFuture asyncBopGetItemCount(String key, + byte[] from, byte[] to, ElementFlagFilter eFlagFilter) { + CollectionCount collectionCount = new BTreeCount(from, to, eFlagFilter); + return asyncCollectionCount(key, collectionCount); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopPipedExistBulk(java.lang.String, java.util.List) + */ + @Override + public CollectionFuture> asyncSopPipedExistBulk(String key, + List values) { + SetPipedExist exist = new SetPipedExist(key, values, + collectionTranscoder); + return asyncSetPipedExist(key, exist); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopPipedExistBulk(java.lang.String, java.util.List, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncSopPipedExistBulk(String key, + List values, Transcoder tc) { + SetPipedExist exist = new SetPipedExist(key, values, tc); + return asyncSetPipedExist(key, exist); + } + + /** + * Generic pipelined existence operation for set items. Public methods call this method. + * + * @param key collection item's key + * @param exist operation parameters (element values) + * @return future holding the map of elements and their existence results + */ + CollectionFuture> asyncSetPipedExist( + final String key, final SetPipedExist exist) { + + if (exist.getItemCount() == 0) { + throw new IllegalArgumentException( + "item count for piped operation cannot be 0."); + } + + if (exist.getItemCount() > CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { + throw new IllegalArgumentException( + "max item count for piped operation cannot be over " + + CollectionPipedStore.MAX_PIPED_ITEM_COUNT + "."); + } + + final CountDownLatch latch = new CountDownLatch(1); + final CollectionFuture> rv = new CollectionFuture>( + latch, operationTimeout); + + Operation op = opFact.collectionPipedExist(key, exist, + new CollectionPipedExistOperation.Callback() { + + Map result = new HashMap(); + + boolean hasAnError = false; + + public void receivedStatus(OperationStatus status) { + if (hasAnError) + return; + + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn("Unhandled state: " + status); + return; + } + rv.set(result, cstatus); + } + + public void complete() { + latch.countDown(); + } + + public void gotStatus(Integer index, OperationStatus status) { + CollectionOperationStatus cstatus; + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + cstatus = new CollectionOperationStatus(status); + } + + switch (cstatus.getResponse()) { + case EXIST: + case NOT_EXIST: + result.put(exist.getValues().get(index), + (CollectionResponse.EXIST.equals(cstatus + .getResponse()))); + break; + case UNREADABLE: + case TYPE_MISMATCH: + case NOT_FOUND: + hasAnError = true; + rv.set(new HashMap(0), + (CollectionOperationStatus) status); + break; + default: + getLogger().warn("Unhandled state: " + status); + } + } + }); + + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, List> elements, + CollectionAttributes attributesForCreate) { + return asyncBopPipedInsertBulk(key, elements, attributesForCreate, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopPipedInsertBulk(java.lang.String, java.util.List, boolean, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, List> elements, + CollectionAttributes attributesForCreate, Transcoder tc) { + if (elements.size() <= CollectionPipedStore.MAX_PIPED_ITEM_COUNT) { + CollectionPipedStore store = new ByteArraysBTreePipedStore( + key, elements, (attributesForCreate != null), + attributesForCreate, tc); + return asyncCollectionPipedStore(key, store); + } else { + PartitionedList> list = new PartitionedList>( + elements, CollectionPipedStore.MAX_PIPED_ITEM_COUNT); + + List> storeList = new ArrayList>( + list.size()); + + for (int i = 0; i < list.size(); i++) { + storeList.add(new ByteArraysBTreePipedStore(key, + list.get(i), (attributesForCreate != null), + attributesForCreate, tc)); + } + + return asyncCollectionPipedStore(key, storeList); + } + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopSortMergeGet(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int) + */ + @Override + public SMGetFuture>> asyncBopSortMergeGet( + List keyList, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, int count) { + if (keyList == null || keyList.isEmpty()) { + throw new IllegalArgumentException("Key list is empty."); + } + if (count < 1) { + throw new IllegalArgumentException( + "Value of 'count' must be larger than 0."); + } + if (offset + count > MAX_SMGET_COUNT) { + throw new IllegalArgumentException( + "Cannot value of 'offset + count' larger than " + MAX_SMGET_COUNT); + } + + Map> arrangedKey = groupingKeys(keyList, smgetKeyChunkSize); + List> smGetList = new ArrayList>( + arrangedKey.size()); + for (List v : arrangedKey.values()) { + if (arrangedKey.size() > 1) { + smGetList.add(new BTreeSMGetWithByteTypeBkey(v, from, to, eFlagFilter, 0, offset + count)); + }else { + smGetList.add(new BTreeSMGetWithByteTypeBkey(v, from, to, eFlagFilter, offset, count)); + } + } + + return smget(smGetList, offset, count, (BTreeUtil.compareByteArraysInLexOrder(from, to) > 0), + collectionTranscoder); + } + + /** + * Generic pipelined store operation for collection items. Public methods for collection items call this method. + * + * @param key collection item's key + * @param storeList list of operation parameters (element values and so on) + * @return future holding the map of element index and the result of its store operation + */ + CollectionFuture> asyncCollectionPipedStore( + final String key, final List> storeList) { + + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + + final CountDownLatch latch = new CountDownLatch(storeList.size()); + + final List mergedOperationStatus = Collections + .synchronizedList(new ArrayList(1)); + + final Map mergedResult = new ConcurrentHashMap(); + + for (int i = 0; i < storeList.size(); i++) { + final CollectionPipedStore store = storeList.get(i); + final int idx = i; + + Operation op = opFact.collectionPipedStore(key, store, + new CollectionPipedStoreOperation.Callback() { + // each result status + public void receivedStatus(OperationStatus status) { + CollectionOperationStatus cstatus; + + if (status instanceof CollectionOperationStatus) { + cstatus = (CollectionOperationStatus) status; + } else { + getLogger().warn( + "[PipeInsert] Unhandled state: " + + status); + return; + } + mergedOperationStatus.add(cstatus); + } + + // complete + public void complete() { + latch.countDown(); + } + + // got status + public void gotStatus(Integer index, + OperationStatus status) { + if (status instanceof CollectionOperationStatus) { + mergedResult + .put(index + + (idx * CollectionPipedStore.MAX_PIPED_ITEM_COUNT), + (CollectionOperationStatus) status); + } else { + mergedResult + .put(index + + (idx * CollectionPipedStore.MAX_PIPED_ITEM_COUNT), + new CollectionOperationStatus( + status)); + } + } + }); + addOp(key, op); + ops.add(op); + } + + return new CollectionFuture>( + latch, operationTimeout) { + + @Override + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + + @Override + public boolean isCancelled() { + for (Operation op : ops) { + if (op.isCancelled()) + return true; + } + return false; + }; + + @Override + public Map get(long duration, + TimeUnit units) throws InterruptedException, + TimeoutException, ExecutionException { + + if (!latch.await(duration, units)) { + for (Operation op : ops) { + MemcachedConnection.opTimedOut(op); + } + throw new CheckedOperationTimeoutException( + "Timed out waiting for operation", ops); + } else { + // continuous timeout counter will be reset + for (Operation op : ops) { + MemcachedConnection.opSucceeded(op); + } + } + + for (Operation op : ops) { + if (op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + } + if (isCancelled()) { + throw new ExecutionException(new RuntimeException( + "Cancelled")); + } + + return mergedResult; + } + + @Override + public CollectionOperationStatus getOperationStatus() { + for (OperationStatus status : mergedOperationStatus) { + if (!status.isSuccess()) { + return new CollectionOperationStatus(status); + } + } + return new CollectionOperationStatus(true, "END", + CollectionResponse.END); + } + }; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopInsertBulk(java.util.List, long, byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public Future> asyncBopInsertBulk( + List keyList, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + + return asyncBopInsertBulk(keyList, bkey, eFlag, value, + attributesForCreate, collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopInsertBulk(java.util.List, long, byte[], java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public Future> asyncBopInsertBulk( + List keyList, long bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + + Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); + + List> storeList = new ArrayList>( + arrangedKey.size()); + + for (List eachKeyList : arrangedKey.values()) { + storeList.add(new CollectionBulkStore.BTreeBulkStore( + eachKeyList, bkey, eFlag, value, attributesForCreate, tc)); + } + + return asyncCollectionInsertBulk2(storeList); + } + + @Override + public Future> asyncBopInsertBulk( + List keyList, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + + return asyncBopInsertBulk(keyList, bkey, eFlag, value, + attributesForCreate, collectionTranscoder); + } + + @Override + public Future> asyncBopInsertBulk( + List keyList, byte[] bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + + Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); + List> storeList = new ArrayList>( + arrangedKey.size()); + + for (List eachKeyList : arrangedKey.values()) { + storeList.add(new CollectionBulkStore.BTreeBulkStore( + eachKeyList, bkey, eFlag, value, attributesForCreate, tc)); + } + + return asyncCollectionInsertBulk2(storeList); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopInsertBulk(java.util.List, java.lang.Object, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public Future> asyncSopInsertBulk( + List keyList, Object value, + CollectionAttributes attributesForCreate) { + + return asyncSopInsertBulk(keyList, value, attributesForCreate, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncSopInsertBulk(java.util.List, java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public Future> asyncSopInsertBulk( + List keyList, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + + Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); + List> storeList = new ArrayList>( + arrangedKey.size()); + + for (List eachKeyList : arrangedKey.values()) { + storeList.add(new CollectionBulkStore.SetBulkStore( + eachKeyList, value, attributesForCreate, tc)); + } + + return asyncCollectionInsertBulk2(storeList); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopInsertBulk(java.util.List, int, java.lang.Object, net.spy.memcached.collection.CollectionAttributes) + */ + @Override + public Future> asyncLopInsertBulk( + List keyList, int index, Object value, + CollectionAttributes attributesForCreate) { + + return asyncLopInsertBulk(keyList, index, value, attributesForCreate, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncLopInsertBulk(java.util.List, int, java.lang.Object, net.spy.memcached.collection.CollectionAttributes, net.spy.memcached.transcoders.Transcoder) + */ + @Override + public Future> asyncLopInsertBulk( + List keyList, int index, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + + Map> arrangedKey = groupingKeys(keyList, NON_PIPED_BULK_INSERT_CHUNK_SIZE); + List> storeList = new ArrayList>( + arrangedKey.size()); + + for (List eachKeyList : arrangedKey.values()) { + storeList.add(new CollectionBulkStore.ListBulkStore( + eachKeyList, index, value, attributesForCreate, tc)); + } + + return asyncCollectionInsertBulk2(storeList); + } + + /** + * Generic bulk store operation for collection items. Public methods for collection items call this method. + * + * @param storeList list of operation parameters (item keys, element values, and so on) + * @return future holding the map of item key and the result of the store operation on that key + */ + private Future> asyncCollectionInsertBulk2( + List> storeList) { + + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + + final Map failedResult = new ConcurrentHashMap(); + + final CountDownLatch latch = new CountDownLatch(storeList.size()); + + for (final CollectionBulkStore store : storeList) { + Operation op = opFact.collectionBulkStore(store.getKeyList(), + store, new CollectionBulkStoreOperation.Callback() { + public void receivedStatus(OperationStatus status) { + + } + + public void complete() { + latch.countDown(); + } + + public void gotStatus(Integer index, + OperationStatus status) { + if (!status.isSuccess()) { + if (status instanceof CollectionOperationStatus) { + failedResult.put( + store.getKeyList().get(index), + (CollectionOperationStatus) status); + } else { + failedResult.put( + store.getKeyList().get(index), + new CollectionOperationStatus( + status)); + } + } + } + }); + ops.add(op); + addOp(store.getKeyList().get(0), op); + } + + // return future + return new CollectionFuture>( + latch, operationTimeout) { + + @Override + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + + @Override + public boolean isCancelled() { + for (Operation op : ops) { + if (op.isCancelled()) + return true; + } + return false; + }; + + @Override + public Map get(long duration, + TimeUnit units) throws InterruptedException, + TimeoutException, ExecutionException { + if (!latch.await(duration, units)) { + for (Operation op : ops) { + MemcachedConnection.opTimedOut(op); + } + throw new CheckedOperationTimeoutException( + "Timed out waiting for bulk operation", ops); + } else { + // continuous timeout counter will be reset + for (Operation op : ops) { + MemcachedConnection.opSucceeded(op); + } + } + + for (Operation op : ops) { + if (op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + } + if (isCancelled()) { + throw new ExecutionException(new RuntimeException( + "Cancelled")); + } + + return failedResult; + } + + @Override + public CollectionOperationStatus getOperationStatus() { + return null; + } + }; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, long, long, net.spy.memcached.collection.ElementFlagFilter, int, int) + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count) { + return asyncBopGetBulk(keyList, from, to, eFlagFilter, offset, count, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, long, long, net.spy.memcached.collection.ElementFlagFilter, int, int, net.spy.memcached.transcoders.Transcoder) + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count, + Transcoder tc) { + if (keyList == null) { + throw new IllegalArgumentException("key list is null."); + } + if (keyList.size() > MAX_GETBULK_KEY_COUNT) { + throw new IllegalArgumentException("size of key list must be less than " + MAX_GETBULK_KEY_COUNT + "."); + } + if (count > MAX_GETBULK_ELEMENT_COUNT) { + throw new IllegalArgumentException("count must be less than " + MAX_GETBULK_ELEMENT_COUNT + "."); + } + if (offset < 0) { + throw new IllegalArgumentException("offset can't be negative."); + } + + Map> rearrangedKeys = groupingKeys(keyList, BOPGET_BULK_CHUNK_SIZE); + + List> getBulkList = new ArrayList>( + rearrangedKeys.size()); + + for (Entry> entry : rearrangedKeys.entrySet()) { + getBulkList.add(new BTreeGetBulkWithLongTypeBkey(entry + .getValue(), from, to, eFlagFilter, offset, count)); + } + + return btreeGetBulk(getBulkList, offset, count, (to > from), tc); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int) + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count) { + return asyncBopGetBulk(keyList, from, to, eFlagFilter, offset, count, + collectionTranscoder); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ArcusClientIF#asyncBopGetBulk(java.util.List, byte[], byte[], net.spy.memcached.collection.ElementFlagFilter, int, int, net.spy.memcached.transcoders.Transcoder) + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count, + Transcoder tc) { + if (keyList == null) { + throw new IllegalArgumentException("key list is null."); + } + if (keyList.size() > MAX_GETBULK_KEY_COUNT) { + throw new IllegalArgumentException("size of key list must be less than " + MAX_GETBULK_KEY_COUNT + "."); + } + if (count > MAX_GETBULK_ELEMENT_COUNT) { + throw new IllegalArgumentException("count must be less than " + MAX_GETBULK_ELEMENT_COUNT + "."); + } + if (offset < 0) { + throw new IllegalArgumentException("offset can't be negative."); + } + + Map> rearrangedKeys = groupingKeys(keyList, BOPGET_BULK_CHUNK_SIZE); + + List> getBulkList = new ArrayList>( + rearrangedKeys.size()); + + for (Entry> entry : rearrangedKeys.entrySet()) { + getBulkList.add(new BTreeGetBulkWithByteTypeBkey(entry + .getValue(), from, to, eFlagFilter, offset, count)); + } + + boolean reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; + + return btreeGetBulkByteArrayBKey(getBulkList, offset, count, reverse, tc); + } + + /** + * Generic bulk get operation for b+tree items. Public methods call this method. + * + * @param getBulkList list of operation parameters (item keys, element key range, and so on) + * @param offset start index of the elements + * @param count number of elements to fetch + * @param reverse forward or backward + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of item key and the fetched elements from that key + */ + private CollectionGetBulkFuture>> btreeGetBulk( + final List> getBulkList, final int offset, + final int count, final boolean reverse, final Transcoder tc) { + + final CountDownLatch latch = new CountDownLatch(getBulkList.size()); + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + final Map> result = new ConcurrentHashMap>(); + + for (BTreeGetBulk getBulk : getBulkList) { + Operation op = opFact.bopGetBulk(getBulk, new BTreeGetBulkOperation.Callback() { + @Override + public void receivedStatus(OperationStatus status) { } + + @Override + public void complete() { + latch.countDown(); + } + + @Override + public void gotKey(String key, int elementCount, OperationStatus status) { + result.put(key, new BTreeGetResult( + (elementCount > 0) ? new TreeMap>() : null, + new CollectionOperationStatus(status))); + } + + @Override + public void gotElement(String key, Object subkey, int flags, byte[] eflag, byte[] data) { + result.get(key).addElement( + new BTreeElement((Long)subkey, eflag, + tc.decode(new CachedData(flags, data, tc.getMaxSize())))); + } + }); + ops.add(op); + addOp(getBulk.getRepresentKey(), op); + } + + return new CollectionGetBulkFuture>>(latch, ops, result, operationTimeout); + } + + /** + * Generic bulk get operation for b+tree items using byte-array type bkeys. Public methods call this method. + * + * @param getBulkList list of operation parameters (item keys, element key range, and so on) + * @param offset start index of the elements + * @param count number of elements to fetch + * @param reverse forward or backward + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of item key and the fetched elements from that key + */ + private CollectionGetBulkFuture>> btreeGetBulkByteArrayBKey( + final List> getBulkList, final int offset, + final int count, final boolean reverse, final Transcoder tc) { + + final CountDownLatch latch = new CountDownLatch(getBulkList.size()); + final ConcurrentLinkedQueue ops = new ConcurrentLinkedQueue(); + final Map> result = new ConcurrentHashMap>(); + + for (BTreeGetBulk getBulk : getBulkList) { + Operation op = opFact.bopGetBulk(getBulk, new BTreeGetBulkOperation.Callback() { + @Override + public void receivedStatus(OperationStatus status) { } + + @Override + public void complete() { + latch.countDown(); + } + + @Override + public void gotKey(String key, int elementCount, OperationStatus status) { + TreeMap> tree = null; + if (elementCount > 0) { + tree = new ByteArrayTreeMap>( + (reverse) ? Collections.reverseOrder() : null); + } + result.put(key, new BTreeGetResult(tree, new CollectionOperationStatus(status))); + } + + @Override + public void gotElement(String key, Object subkey, int flags, byte[] eflag, byte[] data) { + result.get(key).addElement( + new BTreeElement( + new ByteArrayBKey((byte[]) subkey), + eflag, tc.decode(new CachedData(flags, data, tc.getMaxSize())))); + } + }); + ops.add(op); + addOp(getBulk.getRepresentKey(), op); + } + + return new CollectionGetBulkFuture>>(latch, ops, result, operationTimeout); + } + + @Override + public CollectionFuture asyncBopIncr(String key, long subkey, + int by) { + CollectionMutate collectionMutate = new BTreeMutate(Mutator.incr, by); + return asyncCollectionMutate(key, String.valueOf(subkey), collectionMutate); + } + + @Override + public CollectionFuture asyncBopIncr(String key, byte[] subkey, + int by) { + CollectionMutate collectionMutate = new BTreeMutate(Mutator.incr, by); + return asyncCollectionMutate(key,BTreeUtil.toHex(subkey), collectionMutate); + } + + @Override + public CollectionFuture asyncBopDecr(String key, long subkey, + int by) { + CollectionMutate collectionMutate = new BTreeMutate(Mutator.decr, by); + return asyncCollectionMutate(key, String.valueOf(subkey), collectionMutate); + } + + @Override + public CollectionFuture asyncBopDecr(String key, byte[] subkey, + int by) { + CollectionMutate collectionMutate = new BTreeMutate(Mutator.decr, by); + return asyncCollectionMutate(key,BTreeUtil.toHex(subkey), collectionMutate); + } + + /** + * Generic increment/decrement operation for b+tree items. Public methods call this method. + * + * @param k b+tree item's key + * @param subkey element key + * @param collectionMutate operation parameters (increment amount and so on) + * @return future holding the incremented or decremented value + */ + private CollectionFuture asyncCollectionMutate(final String k, final String subkey, + final CollectionMutate collectionMutate) { + + final CountDownLatch latch = new CountDownLatch(1); + + final CollectionFuture rv = new CollectionFuture(latch, + operationTimeout); + + Operation op = opFact.collectionMutate(k, subkey, collectionMutate, + new OperationCallback() { + + @Override + public void receivedStatus(OperationStatus status) { + if (status.isSuccess()) { + try { + rv.set(new Long(status.getMessage()), + new CollectionOperationStatus( + new OperationStatus(true, "END"))); + } catch (NumberFormatException e) { + rv.set(null, new CollectionOperationStatus( + new OperationStatus(false, + status.getMessage()))); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Key(%s), Bkey(%s) Unknown response : %s", + k, subkey, status); + } + } + return; + } + + rv.set(null, new CollectionOperationStatus(status)); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Key(%s), Bkey(%s) Unknown response : %s", + k, subkey, status); + } + } + + @Override + public void complete() { + latch.countDown(); + } + }); + + rv.setOperation(op); + addOp(k, op); + return rv; + } + + /** + * Get the client version. + * @return version string + */ + private static String getVersion() { + Enumeration resEnum; + try { + resEnum = Thread.currentThread().getContextClassLoader().getResources(JarFile.MANIFEST_NAME); + while (resEnum.hasMoreElements()) { + try { + URL url = resEnum.nextElement(); + InputStream is = url.openStream(); + if (is != null) { + Manifest manifest = new Manifest(is); + java.util.jar.Attributes mainAttribs = manifest.getMainAttributes(); + String version = mainAttribs.getValue("Arcusclient-Version"); + if(version != null) { + return version; + } + } + } + catch (Exception e) { + + } + } + } catch (IOException e1) { + return "NONE"; + } + return "NONE"; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ArcusClientException.java b/src/main/java/net/spy/memcached/ArcusClientException.java new file mode 100644 index 000000000..1ec1e3806 --- /dev/null +++ b/src/main/java/net/spy/memcached/ArcusClientException.java @@ -0,0 +1,39 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +@SuppressWarnings("serial") +public abstract class ArcusClientException extends RuntimeException { + + public ArcusClientException(String message) { + super(message); + } + + public ArcusClientException(String message, Throwable e) { + super(message, e); + } + + public static class InitializeClientException extends ArcusClientException { + public InitializeClientException(String message) { + super(message); + } + + public InitializeClientException(String message, Throwable e) { + super(message, e); + } + } +} diff --git a/src/main/java/net/spy/memcached/ArcusClientIF.java b/src/main/java/net/spy/memcached/ArcusClientIF.java new file mode 100644 index 000000000..e9144c009 --- /dev/null +++ b/src/main/java/net/spy/memcached/ArcusClientIF.java @@ -0,0 +1,1696 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; + +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.BTreeGetResult; +import net.spy.memcached.collection.BTreeOrder; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagUpdate; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.BTreeStoreAndGetFuture; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.internal.CollectionGetBulkFuture; +import net.spy.memcached.internal.OperationFuture; +import net.spy.memcached.internal.SMGetFuture; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Interface for Arcus specific commands + */ +public interface ArcusClientIF { + + /** + * Sets attributes (metadata) associated with each key + * of collections including lists, sets, and B+ trees. + * + * @param key key of a collection (list, set, B+ tree) + * @param attrs a collectionAttribute object to set + * @return whether or not the operation was performed + */ + public abstract CollectionFuture asyncSetAttr(String key, + Attributes attrs); + + /** + * Sets attributes (metadata) associated with each key + * of collections including lists, sets, and B+ trees. + * + * @param key key key of a collection (list, set, B+ tree) + * @param expireTime memcached key expire time + * @param maxCount maximum number of items in a collection + * @param overflowAction specify what to do when the number of elements + * already reached the maximum allowable element count in a list + * upon insertion (only valid for list), + * @return whether or not the operation was performed + */ + public abstract CollectionFuture asyncSetAttr(String key, + Integer expireTime, Long maxCount, + CollectionOverflowAction overflowAction); + + /** + * Gets attributes (metadata) associated with each key + * of collections including lists, sets, and B+ trees. + * + * @param key key of a collection (list, set, B+ tree) + * @return a CollectionAttributes object containing attributes + */ + public abstract CollectionFuture asyncGetAttr( + final String key); + + + /** + * Checks an item membership in a set. + * + * @param + * @param key key of a set + * @param value value of an item + * @param tc a transcoder to encode the value + * @return whether or not the item exists in the set + */ + public abstract CollectionFuture asyncSopExist(String key, + T value, Transcoder tc); + + /** + * Checks an item membership in a set using the default transcoder. + * + * @param key key of a set + * @param value value of an item + * @return whether or not the item exists in the set + */ + public abstract CollectionFuture asyncSopExist(String key, + Object value); + + /** + * Set an object in the cache on each key. + * + *

Basic usage

+ *
+	 *	ArcusClient c = getClientFromPool();
+	 *
+	 *	List<String> keys = new ArrayList();
+	 *	keys.add("KEY1");
+	 *	keys.add("KEY2");
+	 *
+	 *	// The object to store
+	 *	Object value = "VALUE";
+	 *
+	 *	// Get customized transcoder
+	 *	Transcoder myTranscoder = getTranscoder();
+	 *
+	 *	// Store a value (async) on each keys for one hour using multiple memcached client.
+	 *	c.setBulk(keys, 3600, value, transcoder);
+	 *	
+ * + * @param + * @param key the key list which this object should be added + * @param exp the expiration of this object + * @param o the object to store on each keys + * @param tc the transcoder to serialize and unserialize the value + * @return a future that will hold the list of failed + * + */ + public abstract Future> asyncSetBulk( + List key, int exp, T o, Transcoder tc); + + /** + * Set an object in the cache on each key using specified memcached client + * + * @param key the key list which this object should be added + * @param exp the expiration of this object + * @param o the object to store on each keys + * @return a future that will hold the list of failed + * + */ + public abstract Future> asyncSetBulk( + List key, int exp, Object o); + + /** + * Set an object in the cache on each key using specified memcached client + * + * @param o the map that has keys and values to store + * @param exp the expiration of this object + * @param tc the transcoder to serialize and unserialize the value + * @return a future that will hold the list of failed + * + */ + public abstract Future> asyncSetBulk( + Map o, int exp, Transcoder tc); + + /** + * Set an object in the cache on each key using specified memcached client + * + * @param o the map that has keys and values to store + * @param exp the expiration of this object + * @return a future that will hold the list of failed + * + */ + public abstract Future> asyncSetBulk( + Map o, int exp); + + /** + * Insert one item into multiple b+trees at once. + * + * @param keyList + * key list of b+tree + * @param bkey + * key of a b+tree element. + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if + * this value is null, Arcus don't assign element flag. + * @param value + * value of element. this value can't be null. + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @param tc + * transcoder to encode value + * @returna future indicating success + */ + public abstract Future> asyncBopInsertBulk( + List keyList, long bkey, byte[] eFlag, T value, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Insert one item into multiple b+trees at once. + * + * @param keyList + * key list of b+tree + * @param bkey + * key of a b+tree element. + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if + * this value is null, Arcus don't assign element flag. + * @param value + * value of element. this value can't be null. + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @returna future indicating success + */ + public abstract Future> asyncBopInsertBulk( + List keyList, long bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate); + + /** + * Insert a value into each list + * + *
+	 * Note to the index
+	 *     The item will be inserted before the element with the given index except below
+	 *     -1:append, 0:prepend
+	 * 
+ * + * @param + * @param keyList a key list of list + * @param index list index (the item will be inserted before the element with the given index) + * @param value a value to insert into each list + * @param attributesForCreate if not true, a list should be created when key does not exist + * @param tc transcoder to encode value + * @return a future that will indicate the failure list of each operation + */ + public abstract Future> asyncLopInsertBulk( + List keyList, int index, T value, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Insert a value into each list + * The value will be encoded by the default transcoder (SerializeTranscoder) + * + *
+	 * Note to the index
+	 *     The item will be inserted before the element with the given index except below
+	 *     -1:append, 0:prepend
+	 * 
+ * + *

Basic usage

+ * + *
+	 *	ArcusClient client = getClientFromPool();
+	 *
+	 *	List<String> keyList = getKeyListShouldHaveValue();
+	 *	String value = "Some-value";
+	 *	int index = 0;
+	 *	boolean createKeyIfNotExists = true;
+	 *
+	 *	Future<Map<String, CollectionOperationStatus>> future = client.asyncLopInsertBulk(keyList,
+	 *			index, value, createKeyIfNotExists);
+	 *
+	 *	Map<String, CollectionOperationStatus> failedList = null;
+	 *	try {
+	 *		failedList = future.get(1000L, TimeUnit.MILLISECONDS);
+	 *	} catch (TimeoutException e) {
+	 *		future.cancel(true);
+	 *		// Handle error here
+	 *	} catch (InterruptedException e) {
+	 *		future.cancel(true);
+	 *		// Handle error here
+	 *	} catch (ExecutionException e) {
+	 *		future.cancel(true);
+	 *		// Handle error here
+	 *	}
+	 *	handleFailure(failedList);
+	 * 
+ * + * @param keyList a key list of the list + * @param index list index (the item will be inserted before the element with the given index) + * @param value a value to insert into each list + * @param attributesForCreate if not null, a list should be created when key does not exist + * @return a future that will indicate the failure list of each operation + */ + public abstract Future> asyncLopInsertBulk( + List keyList, int index, Object value, CollectionAttributes attributesForCreate); + + /** + * Insert a value into each set + * + * @param + * @param keyList a key list of the set + * @param value a value to insert into each set + * @param attributesForCreate if not null, a list should be created when key does not exist + * @param tc transcoder to encode value + * @return a future that will indicate the failure list of each operation + */ + public abstract Future> asyncSopInsertBulk( + List keyList, T value, CollectionAttributes attributesForCreate, Transcoder tc); + + /** + * Insert a value into each set + * + *

Basic usage

+ * + *
 
+	 *	ArcusClient client = getClientFromPool();
+	 *
+	 *	List<String> keyList = getKeyListShouldHaveValue();
+	 *	String value = "Some-value";
+	 *	boolean createKeyIfNotExists = true;
+	 *
+	 *	Future<Map<String, CollectionOperationStatus>> future = client.asyncSopInsertBulk(keyList,
+	 *			value, createKeyIfNotExists);
+	 *
+	 *	Map<String, CollectionOperationStatus> failedList = null;
+	 *	try {
+	 *		failedList = future.get(1000L, TimeUnit.MILLISECONDS);
+	 *	} catch (TimeoutException e) {
+	 *		future.cancel(true);
+	 *		// Handle error here
+	 *	} catch (InterruptedException e) {
+	 *		future.cancel(true);
+	 *		// Handle error here
+	 *	} catch (ExecutionException e) {
+	 *		future.cancel(true);
+	 *		// Handle error here
+	 *	}
+	 *	handleFailure(failedList);
+	 * 
+ * + * @param keyList a key list of set + * @param value a value to insert into each set + * @param attributesForCreate if not null, a list should be created when key does not exist + * @return a future that will indicate the failure list of each operation + */ + public abstract Future> asyncSopInsertBulk( + List keyList, Object value, CollectionAttributes attributesForCreate); + + /** + * Get maximum possible piped bulk insert item count. + * + * @return Get maximum possible piped bulk insert item count. + */ + public abstract int getMaxPipedItemCount(); + + /** + * Create an empty b+ tree + * + * @param key + * key of a b+ tree + * @param valueType + * element data type of the b+ tree + * @param attributes + * attributes of the b+ tree + * @return a future indicating success, false if there was a key + */ + public CollectionFuture asyncBopCreate(String key, + ElementValueType valueType, CollectionAttributes attributes); + + /** + * Create an empty set + * + * @param key + * key of a set + * @param type + * element data type of the set + * @param attributes + * attributes of the set + * @return a future indicating success, false if there was a key + */ + public CollectionFuture asyncSopCreate(String key, + ElementValueType type, CollectionAttributes attributes); + + /** + * Create an empty list + * + * @param key + * key of a list + * @param type + * element data type of the list + * @param attributes + * attributes of the list + * @return a future indicating success, false if there was a key + */ + public CollectionFuture asyncLopCreate(String key, + ElementValueType type, CollectionAttributes attributes); + + /** + * Retrieves an item on given bkey in the b+tree. + * + * @param key + * key of a b+tree + * @param bkey + * bkey + * @param eFlagFilter + * bkey filter + * @param withDelete + * true to remove the returned item in the b+tree + * @param dropIfEmpty + * false to remove the key when all elements are removed. true b+ + * tree will remain empty even if all the elements are removed + * @return a future that will hold the return value map of the fetch. + */ + public CollectionFuture>> asyncBopGet(String key, + long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves count number of items in given bkey range(from..to) + * from offset in the b+tree. + * The returned map from the future should be sorted by the given range. + *
+	 * 	from >= to : in descending order
+	 * 	from < to  : in ascending order
+	 * 
+ * + * @param key key of a b+tree + * @param from the first bkey + * @param to the last bkey + * @param eFlagFilter bkey filter + * @param offset 0-based offset + * @param count number of returning values (0 to all) + * @param withDelete true to remove the returned item in the b+tree + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return a future that will hold the return value map of the fetch + */ + public CollectionFuture>> asyncBopGet(String key, + long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, + boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves an item on given bkey in the b+tree. + * + * @param + * @param key key of a b+tree + * @param bkey bkey + * @param eFlagFilter bkey filter + * @param withDelete true to remove the returned item in the b+tree + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc a transcoder to decode returned values + * @return a future that will hold the return value map of the fetch. + */ + public CollectionFuture>> asyncBopGet(String key, + long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty, Transcoder tc); + + /** + * Retrieves count number of items in given bkey range(from..to) + * from offset in the b+tree. + * The returned map from the future should be sorted by the given range. + *
+	 * 	from >= to : in descending order
+	 * 	from < to  : in ascending order
+	 * 
+ * + * @param + * @param key key of a b+tree + * @param from the first bkey + * @param to the last bkey + * @param eFlagFilter bkey filter + * @param offset 0-based offset + * @param count number of returning values (0 to all) + * @param withDelete true to remove the returned item in the b+tree + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc a transcoder to decode returned values + * @return a future that will hold the return value map of the fetch + */ + public CollectionFuture>> asyncBopGet(String key, + long from, long to, ElementFlagFilter eFlagFilter, int offset, int count, + boolean withDelete, boolean dropIfEmpty, Transcoder tc); + + /** + * Retrieves an item on given index in the list. + * + * @param key key of a list + * @param index list index + * @param withDelete true to remove the returned item in the list + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return a future that will hold the return value list of the fetch + */ + public CollectionFuture> asyncLopGet(String key, int index, + boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves items on given index range(from..to) in the list. + * + * @param key key of a list + * @param from the first index to delete + * @param to the last index to delete + * @param withDelete true to remove the returned items in the list + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return a future that will hold the return value list of the fetch + */ + public CollectionFuture> asyncLopGet(String key, int from, + int to, boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves an item on given index in the list. + * + * @param + * @param key key of a list + * @param index list index + * @param withDelete true to remove the returned item in the list + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc a tranacoder to decode returned value + * @return a future that will hold the return value list of the fetch + */ + public CollectionFuture> asyncLopGet(String key, int index, + boolean withDelete, boolean dropIfEmpty, Transcoder tc); + + /** + * Retrieves items on given index range(from..to) in the list. (Arcus 1.6 and above) + * + * @param + * @param key key of a list + * @param from the first index to delete + * @param to the last index to delete + * @param withDelete true to remove the returned items in the list + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc a transcoder to decode the returned values + * @return a future that will hold the return value list of the fetch + */ + public CollectionFuture> asyncLopGet(String key, int from, + int to, boolean withDelete, boolean dropIfEmpty, Transcoder tc); + + /** + * Retrieves count number of random items in the set. + * + * @param key key of a list + * @param count number of items to fetch + * @param withDelete true to remove the returned item in the list + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return a future that will hold the return value set of the fetch + */ + public CollectionFuture> asyncSopGet(String key, int count, + boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves count number of random items in the set. + * + * @param + * @param key key of a list + * @param count number of items to fetch + * @param withDelete true to remove the returned item in the list + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc a tranacoder to decode returned value + * @return a future that will hold the return value set of the fetch + */ + public CollectionFuture> asyncSopGet(String key, int count, + boolean withDelete, boolean dropIfEmpty, Transcoder tc); + + /** + * Deletes an item with given bkey in the b+tree. (Arcus 1.6 or above) + * + * @param key key of a b+tree + * @param bkey bkey of an item to delete + * @param eFlagFilter bkey filter + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncBopDelete(String key, long bkey, + ElementFlagFilter eFlagFilter, boolean dropIfEmpty); + + /** + * Deletes count number of items in given bkey range(from..to) in the b+tree (Arcus 1.6 or above) + * + * @param key key of a b+tree + * @param from the first bkey to delete + * @param to the last bkey to delete + * @param eFlagFilter bkey filter + * @param count number of returning values (0 to all) + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncBopDelete(String key, long from, + long to, ElementFlagFilter eFlagFilter, int count, boolean dropIfEmpty); + + /** + * Deletes count number of items in given bkey range(from..to) in the b+tree (Arcus 1.6 or above) + * + * @param key key of a b+tree + * @param from the first bkey to delete + * @param to the last bkey to delete + * @param eFlagFilter bkey filter + * @param count number of returning values (0 to all) + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncBopDelete(String key, + byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int count, + boolean dropIfEmpty); + + /** + * Deletes count number of items in given bkey range(from..to) in the b+tree (Arcus 1.6 or above) + * + * @param key key of a b+tree + * @param bkey bkey to delete + * @param eFlagFilter bkey filter + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncBopDelete(String key, + byte[] bkey, ElementFlagFilter eFlagFilter, boolean dropIfEmpty); + + /** + * Deletes an item on given index in the list. + * + * @param key key of a list + * @param index list index + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncLopDelete(String key, int index, + boolean dropIfEmpty); + + /** + * Deletes items on given index range(from..to) in the list. + * + * @param key key of a list + * @param from the first index to delete + * @param to the last index to delete + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncLopDelete(String key, int from, + int to, boolean dropIfEmpty); + + /** + * Deletes an item in the set. + * + * @param key key of a set + * @param value value of an item + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return whether or not the operation was performed + */ + public CollectionFuture asyncSopDelete(String key, Object value, + boolean dropIfEmpty); + + /** + * Deletes an item in the set. + * + * @param + * @param key key of a set + * @param value value of an item + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc a transcoder to encode the value + * @return whether or not the operation was performed + */ + public CollectionFuture asyncSopDelete(String key, T value, + boolean dropIfEmpty, Transcoder tc); + + /** + * Get count of elements in given bkey range(from..to) and eFlagFilter. + * + * @param key + * key of a b+tree + * @param from + * the first bkey + * @param to + * the last bkey + * @param eFlagFilter + * bkey filter + * @return a future that will hold the count of exists element + */ + public CollectionFuture asyncBopGetItemCount(String key, + long from, long to, ElementFlagFilter eFlagFilter); + + /** + * Inserts an item into the b+tree. + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree node + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if this argument is null, Arcus don't assign element flag. + * @param value + * a value to insert into the b+tree + * @param attributesForCreate + * attributes of the key + * @return a future indicating success, false if there was no key and + * attributesForCreate is null + */ + public CollectionFuture asyncBopInsert(String key, long bkey, + byte[] eFlag, Object value, CollectionAttributes attributesForCreate); + + /** + * Insert a value into each list + * + *
+	 * Note to the index
+	 *     The item will be inserted before the element with the given index except below
+	 *     -1:append, 0:prepend
+	 * 
+ * @param index list index (the item will be inserted before the element with the given index) + * @param value a value to insert into each list + * @param attributesForCreate attributes of the key + * @param keyList a key list of list + * + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture asyncLopInsert(String key, int index, + Object value, CollectionAttributes attributesForCreate); + + /** + * Inserts an item into the set. + * + * @param key key of a set + * @param value a value to insert into the set + * @param attributesForCreate attributes of the key + * @return a future indicating success, false if there was no key + * and attributesForCreate parameter is null. + */ + public CollectionFuture asyncSopInsert(String key, Object value, + CollectionAttributes attributesForCreate); + + /** + * Inserts an item into the b+tree. + * + * @param + * @param key key of a b+tree + * @param bkey key of a b+tree node + * @param eFlag element flag. Length of element flag is between 1 and 31. if this argument is null, Arcus don't assign element flag. + * @param value a value to insert into the b+tree + * @param attributesForCreate attributes of the key + * @param tc a trancoder to encode the value + * @return a future indicating success, false if there was no key + * and attributesForCreate parameter is null. + */ + public CollectionFuture asyncBopInsert(String key, long bkey, + byte[] eFlag, T value, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Insert a value into each list + * + *
+	 * Note to the index
+	 *     The item will be inserted before the element with the given index except below
+	 *     -1:append, 0:prepend
+	 * 
+ * @param index list index (the item will be inserted before the element with the given index) + * @param value a value to insert into each list + * @param attributesForCreate attributes of the key + * @param tc a transcoder to encode the value + * @param keyList a key list of list + * + * @param + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture asyncLopInsert(String key, int index, + T value, CollectionAttributes attributesForCreate, Transcoder tc); + + /** + * Inserts an item into the set. + * + * @param + * @param key key of a set + * @param value a value to insert into the set + * @param tc a transcoder to encode the value + * @param attributesForCreate attributes of the key + * @return a future indicating success, false if there was no key + * and attributesForCreate parameter is null + */ + public CollectionFuture asyncSopInsert(String key, T value, + CollectionAttributes attributesForCreate, Transcoder tc); + + /** + * Insert values into a b+ tree + * + * @param key a key list of b+ tree + * @param elements + * @param attributesForCreate attributes of the key + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture> asyncBopPipedInsertBulk( + String key, Map elements, CollectionAttributes attributesForCreate); + + /** + * Insert values into a list + * + *
+	 * Note to the index
+	 *     The item will be inserted before the element with the given index except below
+	 *     -1:append, 0:prepend
+	 * 
+ * + * @param key a key of the list + * @param index list index (the item will be inserted before the element with the given index) + * @param valueList valuses to insert into the set + * @param attributesForCreate attributes of the key + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture> asyncLopPipedInsertBulk( + String key, int index, List valueList, CollectionAttributes attributesForCreate); + + /** + * Insert values into a set + * + * @param key key of a set + * @param valueList valuses to insert into the set + * @param attributesForCreate attributes of the key + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture> asyncSopPipedInsertBulk( + String key, List valueList, CollectionAttributes attributesForCreate); + + /** + * Insert values into a b+ tree + * + * @param + * @param key a key list of b+ tree + * @param elements + * @param attributesForCreate attributes of the key + * @param tc transcoder to encode value + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture> asyncBopPipedInsertBulk( + String key, Map elements, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Insert values into a list + * + *
+	 * Note to the index
+	 *     The item will be inserted before the element with the given index except below
+	 *     -1:append, 0:prepend
+	 * 
+ * + * @param + * @param key a key of the list + * @param index list index (the item will be inserted before the element with the given index) + * @param valueList valuses to insert into the set + * @param attributesForCreate attributes of the key + * @param tc transcoder to encode value + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture> asyncLopPipedInsertBulk( + String key, int index, List valueList, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Insert values into a set + * + * @param + * @param key key of a set + * @param valueList valuses to insert into the set + * @param attributesForCreate attributes of the key + * @param tc transcoder to encode value + * @return a future that will indicate the failure list of each operation + */ + public CollectionFuture> asyncSopPipedInsertBulk( + String key, List valueList, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Flush all items that starts with given prefix from all servers. + * + * @param prefix prefix of the keys + * @return whether or not the operation was accepted + */ + public OperationFuture flush(final String prefix); + + /** + * Flush all items that starts with given prefix from all servers with a delay of application. + * + * @param prefix prefix of the keys + * @param delay the period of time to delay, in seconds + * @return whether or not the operation was accepted + */ + public OperationFuture flush(final String prefix, final int delay); + + /** + * Get elements that matched both filter and bkey range criteria from + * multiple b+tree. The result is sorted by order of bkey. + * + * @param keyList + * b+ tree key list + * @param from + * bkey index from + * @param to + * bkey index to + * @param eFlagFilter + * bkey filter + * @param offset + * 0-base offset + * @param count + * number of returning values (0 to all) + * @return a future that will hold the return value list of the fetch. + */ + public SMGetFuture>> asyncBopSortMergeGet( + List keyList, long from, long to, ElementFlagFilter eFlagFilter, int offset, int count); + + /** + * Update or insert an element. + * + * Element that matched both key and bkey criteria will updated. + * If element is not exists and attributesForCreate argument is not null. + * Create the tree that has an attribute of 'attributesForCreate' and insert the element that has elementFlag and value. + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param elementFlag + * flag of element + * @param value + * value of element + * @param attributesForCreate + * create a b+tree with this attributes, if given key of b+tree + * is not exists. + * @returna future indicating success, false if there was no key and + * attributesForCreate argument is null. + */ + public CollectionFuture asyncBopUpsert(String key, long bkey, + byte[] elementFlag, Object value, CollectionAttributes attributesForCreate); + + /** + * Update or insert an element. + * + * Element that matched both key and bkey criteria will updated. + * If element is not exists and attributesForCreate argument is not null. + * Create the tree that has an attribute of 'attributesForCreate' and insert the element that has elementFlag and value. + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param elementFlag + * flag of element + * @param value + * value of element + * @param attributesForCreate + * create a b+tree with this attributes, if given key of b+tree + * is not exists. + * @param tc transcoder to encode value + * @returna future indicating success, false if there was no key and + * attributesForCreate argument is null. + */ + public CollectionFuture asyncBopUpsert(String key, long bkey, + byte[] elementFlag, T value, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Update an element from the b+tree + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param eFlagUpdate + * new flag of element. + * do not update the eflag if this argument is null. + * @param value + * new value of element. + * do not update the value if this argument is null. + * @returna future indicating success + */ + public CollectionFuture asyncBopUpdate(String key, long bkey, + ElementFlagUpdate eFlagUpdate, Object value); + + /** + * Update an element from the b+tree + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param eFlagUpdate + * new flag of element. + * do not update the eflag if this argument is null. + * @param value + * new value of element. + * do not update the value if this argument is null. + * @param tc + * a transcoder to encode the value of element + * @returna future indicating success + */ + public CollectionFuture asyncBopUpdate(String key, long bkey, + ElementFlagUpdate eFlagUpdate, T value, Transcoder tc); + + /** + * Update elements from the b+tree + * + * @param key + * key of a b+tree + * @param elements + * list of b+tree elements + * @returna future indicating success + */ + public CollectionFuture> asyncBopPipedUpdateBulk( + String key, List> elements); + + /** + * Update elements from the b+tree + * + * @param key + * key of a b+tree + * @param elements + * list of b+tree elements + * @param tc + * a transcoder to encode the value of element + * @returna future indicating success + */ + public CollectionFuture> asyncBopPipedUpdateBulk( + String key, List> elements, Transcoder tc); + + /** + * Insert an item into the b+tree + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if + * this value is null, we don't assign element flag. + * @param value + * new value of element. this value can't be null. + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @returna future indicating success, false if there was no key and attributesForCreate argument is null + */ + public CollectionFuture asyncBopInsert(String key, + byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate); + + /** + * Insert an item into the b+tree + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if + * this value is null, we don't assign element flag. + * @param value + * new value of element. do not update the value if this argument + * is null. this value can't be null. + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @param tc + * transcoder to encode value + * @returna future indicating success, false if there was no key and attributesForCreate argument is null + */ + public CollectionFuture asyncBopInsert(String key, + byte[] bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc); + + /** + * Retrieves count number of items in given bkey range(from..to) + * from offset in the b+tree. + * The returned map from the future should be sorted by the given range. + *
+	 * 	from >= to : in descending order
+	 * 	from < to  : in ascending order
+	 * 
+ * + * @param key key of a b+tree + * @param from the first bkey + * @param to the last bkey + * @param eFlagFilter bkey filter + * @param offset 0-based offset + * @param count number of returning values (0 to all) + * @param withDelete true to remove the returned item in the b+tree + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @return a future that will hold the return value map of the fetch + */ + public CollectionFuture>> asyncBopGet( + String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, + int count, boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves count number of items in given bkey range(from..to) + * from offset in the b+tree. + * The returned map from the future should be sorted by the given range. + *
+	 * 	from >= to : in descending order
+	 * 	from < to  : in ascending order
+	 * 
+ * + * @param key key of a b+tree + * @param from the first bkey + * @param to the last bkey + * @param eFlagFilter bkey filter + * @param offset 0-based offset + * @param count number of returning values (0 to all) + * @param withDelete true to remove the returned item in the b+tree + * @param dropIfEmpty false to remove the key when all elements are removed. true b+ tree will remain empty even if all the elements are removed + * @param tc transcoder to decode value + * @return a future that will hold the return value map of the fetch + */ + public CollectionFuture>> asyncBopGet( + String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, int offset, + int count, boolean withDelete, boolean dropIfEmpty, + Transcoder tc); + + /** + * Update or insert an element. + * + * Element that matched both key and bkey criteria will updated. If element + * is not exists and attributesForCreate argument is not null. Create the + * tree that has an attribute of 'attributesForCreate' and insert the + * element that has elementFlag and value. + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param elementFlag + * flag of element + * @param value + * value of element + * @param attributesForCreate + * create a b+tree with this attributes, if given key of b+tree + * is not exists. + * @returna future indicating success, false if there was no key and + * attributesForCreate argument is null. + */ + public CollectionFuture asyncBopUpsert(String key, + byte[] bkey, byte[] elementFlag, Object value, + CollectionAttributes attributesForCreate); + + /** + * Update or insert an element. + * + * Element that matched both key and bkey criteria will updated. If element + * is not exists and attributesForCreate argument is not null. Create the + * tree that has an attribute of 'attributesForCreate' and insert the + * element that has elementFlag and value. + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param elementFlag + * flag of element + * @param value + * value of element + * @param attributesForCreate + * create a b+tree with this attributes, if given key of b+tree + * is not exists. + * @param tc + * transcoder to encode value + * @returna future indicating success, false if there was no key and + * attributesForCreate argument is null. + */ + public CollectionFuture asyncBopUpsert(String key, + byte[] bkey, byte[] elementFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc); + + /** + * Get count of elements in given bkey range(from..to) and eFlagFilter. + * + * @param key + * key of a b+tree + * @param from + * the first bkey + * @param to + * the last bkey + * @param eFlagFilter + * bkey filter + * @return a future that will hold the count of exists element + */ + public CollectionFuture asyncBopGetItemCount(String key, + byte[] from, byte[] to, ElementFlagFilter eFlagFilter); + + /** + * Update an element from the b+tree + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param eFlagUpdate + * new flag of element. do not update the eflag if this argument + * is null. + * @param value + * new value of element. do not update the value if this argument + * is null. + * @returna future indicating success + */ + public CollectionFuture asyncBopUpdate(String key, + byte[] bkey, ElementFlagUpdate eFlagUpdate, Object value); + + /** + * Update an element from the b+tree + * + * @param key + * key of a b+tree + * @param bkey + * key of a b+tree element + * @param eFlagUpdate + * new flag of element. do not update the eflag if this argument + * is null. + * @param value + * new value of element. do not update the value if this argument + * is null. + * @param tc + * transcoder to encode value + * @returna future indicating success + */ + public CollectionFuture asyncBopUpdate(String key, + byte[] bkey, ElementFlagUpdate eFlagUpdate, T value, Transcoder tc); + + /** + * Checks multiple items' membership in a set using the default transcoder. + * + * @param key + * key of set + * @param values + * value list to check membership + * @return a future indicating the map that represent existence of each + * values + */ + public CollectionFuture> asyncSopPipedExistBulk( + String key, List values); + + /** + * Checks multiple items' membership in a set using the default transcoder. + * + * @param key + * key of set + * @param values + * value list to check membership + * @param tc + * transcoder to decode each value + * @return a future indicating the map that represent existence of each + * value + */ + public CollectionFuture> asyncSopPipedExistBulk( + String key, List values, Transcoder tc); + + /** + * Insert elements into a b+tree + * + * @param key + * a key list of b+ tree + * @param elements + * element list which insert into b+tree + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @return a future that will hold the index of iteration sequence which + * failed elements and result code. + * + */ + public CollectionFuture> asyncBopPipedInsertBulk( + String key, List> elements, + CollectionAttributes attributesForCreate); + + /** + * Insert elements into a b+tree + * + * @param key + * a key list of b+ tree + * @param elements + * element list which insert into b+tree + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @tc transcoder to decode value + * @return a future that will hold the index of iteration sequence which + * failed elements and result code. + * + */ + public CollectionFuture> asyncBopPipedInsertBulk( + String key, List> elements, + CollectionAttributes attributesForCreate, Transcoder tc); + + /** + * Retrieves count number of items in given bkey in the b+tree. + * + * @param key + * key of a b+tree + * @param bkey + * bkey of an element + * @param eFlagFilter + * bkey filter + * @param withDelete + * true to remove the returned item in the b+tree + * @param dropIfEmpty + * false to remove the key when all elements are removed. true b+ + * tree will remain empty even if all the elements are removed + * @return a future that will hold the return value map of the fetch + */ + public CollectionFuture>> asyncBopGet( + String key, byte[] bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty); + + /** + * Retrieves count number of items in given bkey in the b+tree. + * + * @param key + * key of a b+tree + * @param bkey + * bkey of an element + * @param eFlagFilter + * bkey filter + * @param withDelete + * true to remove the returned item in the b+tree + * @param dropIfEmpty + * false to remove the key when all elements are removed. true b+ + * tree will remain empty even if all the elements are removed + * @param tc + * transcoder to decode value + * @return a future that will hold the return value map of the fetch + */ + public CollectionFuture>> asyncBopGet( + String key, byte[] bkey, ElementFlagFilter eFlagFilter, boolean withDelete, boolean dropIfEmpty, + Transcoder tc); + + /** + * Get elements that matched both filter and bkey range criteria from + * multiple b+tree. The result is sorted by order of bkey. + * + * @param keyList + * b+ tree key list + * @param from + * bkey index from + * @param to + * bkey index to + * @param eFlagFilter + * bkey filter + * @param offset + * 0-base offset + * @param count + * number of returning values (0 to all) + * @return a future that will hold the return value list of the fetch. + */ + public SMGetFuture>> asyncBopSortMergeGet( + List keyList, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, + int offset, int count); + + /** + * Insert one item into multiple b+trees at once. + * + * @param keyList + * key list of b+tree + * @param bkey + * key of a b+tree element. + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if + * this value is null, Arcus don't assign element flag. + * @param value + * value of element. this value can't be null. + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @returna future indicating success + */ + public abstract Future> asyncBopInsertBulk( + List keyList, byte[] bkey, byte[] eFlag, Object value, CollectionAttributes attributesForCreate); + + /** + * Insert one item into multiple b+trees at once. + * + * @param keyList + * key list of b+tree + * @param bkey + * key of a b+tree element. + * @param eFlag + * element flag. Length of element flag is between 1 and 31. if + * this value is null, Arcus don't assign element flag. + * @param value + * value of element. this value can't be null. + * @param attributesForCreate + * create a b+tree with this attributes, if given key is not + * exists. + * @param tc + * transcoder to encode value + * @returna future indicating success + */ + public abstract Future> asyncBopInsertBulk( + List keyList, byte[] bkey, byte[] eFlag, T value, CollectionAttributes attributesForCreate, + Transcoder tc); + + /** + * Get elements from each b+tree. + * + * @param keyList + * key list of b+tree + * @param from + * bkey from + * @param to + * bkey to + * @param eFlagFilter + * bkey filter + * @param offset + * 0-based offset (max = 50) + * @param count + * number of returning values (0 to all) (max = 200) + * @return future indicating result of each b+tree + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count); + + /** + * Get elements from each b+tree. + * + * @param keyList + * key list of b+tree + * @param from + * bkey from + * @param to + * bkey to + * @param eFlagFilter + * bkey filter + * @param offset + * 0-based offset (max = 50) + * @param count + * number of returning values (0 to all) (max = 200) + * @param tc + * transcoder to decode value + * @return future indicating result of each b+tree + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count, + Transcoder tc); + + /** + * Get elements from each b+tree. + * + * @param keyList + * key list of b+tree + * @param from + * bkey from + * @param to + * bkey to + * @param eFlagFilter + * bkey filter + * @param offset + * 0-based offset (max = 50) + * @param count + * number of returning values (0 to all) (max = 200) + * @return future indicating result of each b+tree + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count); + + /** + * Get elements from each b+tree. + * + * @param keyList + * key list of b+tree + * @param from + * bkey from + * @param to + * bkey to + * @param eFlagFilter + * bkey filter + * @param offset + * 0-based offset (max = 50) + * @param count + * number of returning values (0 to all) (max = 200) + * @param tc + * transcoder to decode value + * @return future indicating result of each b+tree + */ + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count, + Transcoder tc); + + /** + * Increment the element's value in b+tree. + * + * @param key b+tree item's key + * @param subkey element's key + * @param by increment amount + * @return future holding the incremented value + */ + public CollectionFuture asyncBopIncr(String key, long subkey, int by); + + /** + * Increment the element's value in b+tree. + * + * @param key b+tree item's key + * @param subkey element's key (byte-array type key) + * @param by increment amount + * @return future holding the incremented value + */ + public CollectionFuture asyncBopIncr(String key, byte[] subkey, int by); + + /** + * Decrement the element's value in b+tree. + * + * @param key b+tree item's key + * @param subkey element's key + * @param by decrement amount + * @return future holding the decremented value + */ + public CollectionFuture asyncBopDecr(String key, long subkey, int by); + + /** + * Decrement the element's value in b+tree. + * + * @param key b+tree item's key + * @param subkey element's key (byte-array type key) + * @param by decrement amount + * @return future holding the decremented value + */ + public CollectionFuture asyncBopDecr(String key, byte[] subkey, int by); + + /** + * Get an element from b+tree using its position. + * + * @param key b+tree item's key + * @param order ascending/descending order + * @param pos element's position + * @return future holding the map of the element and its position + */ + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int pos); + + /** + * Get an element from b+tree using its position. + * + * @param key b+tree item's key + * @param order ascending/descending order + * @param pos element's position + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of the element and its position + */ + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int pos, Transcoder tc); + + /** + * Get multiple elements from b+tree using positions. + * + * @param key b+tree item's key + * @param order ascending/descending order + * @param from start position + * @param to end position + * @return future holding the map of the elements and their positions + */ + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int from, int to); + + /** + * Get multiple elements from b+tree using positions. + * + * @param key b+tree item's key + * @param order ascending/descending order + * @param from start position + * @param to end position + * @param tc transcoder to serialize and unserialize value + * @return future holding the map of the elements and their positions + */ + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int from, int to, Transcoder tc); + + /** + * Get the position of the element in b+tree. + * + * @param key b+tree item's key + * @param longBKey element's key + * @param order ascending/descending order + * @return future holding the element's position + */ + public CollectionFuture asyncBopFindPosition( + String key, long longBKey, BTreeOrder order); + + + /** + * Get the position of the element in b+tree. + * + * @param key b+tree item's key + * @param byteArrayBKey element's key (byte-array type key) + * @param order ascending/descending order + * @return future holding the element's position + */ + public CollectionFuture asyncBopFindPosition( + String key, byte[] byteArrayBKey, BTreeOrder order); + + /** + * Insert an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate); + + /** + * Insert an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @param transcoder transcoder to serialize and unserialize value + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder); + + /** + * Insert an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key (byte-array type key) + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate); + + /** + * Insert an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key (byte-array type key) + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @param transcoder transcoder to serialize and unserialize value + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder); + + /** + * Upsert (update if element exists, insert otherwise) an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate); + + /** + * Upsert (update if element exists, insert otherwise) an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @param transcoder transcoder to serialize and unserialize value + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder); + + /** + * Upsert (update if element exists, insert otherwise) an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key (byte-array type key) + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate); + + /** + * Upsert (update if element exists, insert otherwise) an element into b+tree and also get the "trimmed" element if any. + * + * @param key b+tree item's key + * @param bkey element's key (byte-array type key) + * @param eFlag optional element flag + * @param value element's value + * @param attributesForCreate optional attributes used for creating b+tree item + * @param transcoder transcoder to serialize and unserialize value + * @return future holding the success/error of the operation and the trimmed element + */ + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder); + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ArcusClientPool.java b/src/main/java/net/spy/memcached/ArcusClientPool.java new file mode 100644 index 000000000..b6020af7d --- /dev/null +++ b/src/main/java/net/spy/memcached/ArcusClientPool.java @@ -0,0 +1,1013 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.net.SocketAddress; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.BTreeGetResult; +import net.spy.memcached.collection.BTreeOrder; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.CollectionPipedStore; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagUpdate; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.BTreeStoreAndGetFuture; +import net.spy.memcached.internal.BulkFuture; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.internal.CollectionGetBulkFuture; +import net.spy.memcached.internal.OperationFuture; +import net.spy.memcached.internal.SMGetFuture; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Bags for ArcusClient + */ +public class ArcusClientPool implements ArcusClientIF { + + int poolSize; + ArcusClient[] client; + Random rand; + + public ArcusClientPool(int poolSize, ArcusClient[] client) { + + this.poolSize = poolSize; + this.client = client; + rand = new Random(); + } + + /** + * Returns single ArcusClient + * + * @return ArcusClient + */ + public ArcusClient getClient() { + return client[rand.nextInt(poolSize)]; + } + + /** + * Returns all ArcusClient in pool + * + * @return ArcusClient array + */ + public ArcusClient[] getAllClients() { + return client; + } + + public void shutdown() { + for (ArcusClient ac : client) { + ac.shutdown(); + } + } + + public Future append(long cas, String key, Object val) { + return this.getClient().append(cas, key, val); + } + + public Future append(long cas, String key, T val, + Transcoder tc) { + return this.getClient().append(cas, key, val, tc); + } + + public Future prepend(long cas, String key, Object val) { + return this.getClient().prepend(cas, key, val); + } + + public Future prepend(long cas, String key, T val, + Transcoder tc) { + return this.getClient().prepend(cas, key, val, tc); + } + + public Future asyncCAS(String key, long casId, T value, + Transcoder tc) { + return this.getClient().asyncCAS(key, casId, value, tc); + } + + public Future asyncCAS(String key, long casId, Object value) { + + return this.getClient().asyncCAS(key, casId, value); + } + + public CASResponse cas(String key, long casId, T value, Transcoder tc) + throws OperationTimeoutException { + return this.getClient().cas(key, casId, value, tc); + } + + public CASResponse cas(String key, long casId, Object value) + throws OperationTimeoutException { + return this.getClient().cas(key, casId, value); + } + + public Future add(String key, int exp, T o, Transcoder tc) { + return this.getClient().add(key, exp, o, tc); + } + + public Future add(String key, int exp, Object o) { + return this.getClient().add(key, exp, o); + } + + public Future set(String key, int exp, T o, Transcoder tc) { + return this.getClient().set(key, exp, o, tc); + } + + public Future set(String key, int exp, Object o) { + return this.getClient().set(key, exp, o); + } + + public Future replace(String key, int exp, T o, + Transcoder tc) { + return this.getClient().replace(key, exp, o, tc); + } + + public Future replace(String key, int exp, Object o) { + return this.getClient().replace(key, exp, o); + } + + public Future asyncGet(String key, Transcoder tc) { + return this.getClient().asyncGet(key, tc); + } + + public Future asyncGet(String key) { + return this.getClient().asyncGet(key); + } + + public Future> asyncGets(String key, Transcoder tc) { + return this.getClient().asyncGets(key, tc); + } + + public Future> asyncGets(String key) { + return this.getClient().asyncGets(key); + } + + public CASValue gets(String key, Transcoder tc) + throws OperationTimeoutException { + return this.getClient().gets(key, tc); + } + + public CASValue gets(String key) throws OperationTimeoutException { + return this.getClient().gets(key); + } + + public T get(String key, Transcoder tc) + throws OperationTimeoutException { + return this.getClient().get(key, tc); + } + + public Object get(String key) throws OperationTimeoutException { + return this.getClient().get(key); + } + + public BulkFuture> asyncGetBulk(Collection keys, + Iterator> tcs) { + return this.getClient().asyncGetBulk(keys, tcs); + } + + public BulkFuture> asyncGetBulk(Collection keys, + Transcoder tc) { + return this.getClient().asyncGetBulk(keys, tc); + } + + public BulkFuture> asyncGetBulk(Collection keys) { + return this.getClient().asyncGetBulk(keys); + } + + public BulkFuture> asyncGetBulk(Transcoder tc, + String... keys) { + return this.getClient().asyncGetBulk(tc, keys); + } + + public BulkFuture> asyncGetBulk(String... keys) { + return this.getClient().asyncGetBulk(keys); + } + + public Map getBulk(Collection keys, Transcoder tc) + throws OperationTimeoutException { + return this.getClient().getBulk(keys, tc); + } + + public Map getBulk(Collection keys) + throws OperationTimeoutException { + return this.getClient().getBulk(keys); + } + + public Map getBulk(Transcoder tc, String... keys) + throws OperationTimeoutException { + return this.getClient().getBulk(tc, keys); + } + + public Map getBulk(String... keys) + throws OperationTimeoutException { + return this.getClient().getBulk(keys); + } + + public Map getVersions() { + return this.getClient().getVersions(); + } + + public Map> getStats() { + return this.getClient().getStats(); + } + + public Map> getStats(String prefix) { + return this.getClient().getStats(prefix); + } + + public long incr(String key, int by) throws OperationTimeoutException { + return this.getClient().incr(key, by); + } + + public long decr(String key, int by) throws OperationTimeoutException { + return this.getClient().decr(key, by); + } + + public long incr(String key, int by, long def) + throws OperationTimeoutException { + return this.getClient().incr(key, by, def); + } + + public long incr(String key, int by, long def, int exp) + throws OperationTimeoutException { + return this.getClient().incr(key, by, def, exp); + } + + public long decr(String key, int by, long def) + throws OperationTimeoutException { + return this.getClient().decr(key, by, def); + } + + public long decr(String key, int by, long def, int exp) + throws OperationTimeoutException { + return this.getClient().decr(key, by, def, exp); + } + + public Future asyncIncr(String key, int by) { + return this.getClient().asyncIncr(key, by); + } + + public Future asyncIncr(String key, int by, long def, int exp) { + return this.getClient().asyncIncr(key, by, def, exp); + } + + public Future asyncDecr(String key, int by) { + return this.getClient().asyncDecr(key, by); + } + + public Future asyncDecr(String key, int by, long def, int exp) { + return this.getClient().asyncDecr(key, by, def, exp); + } + + public Future delete(String key) { + return this.getClient().delete(key); + } + + public Future flush(int delay) { + return this.getClient().flush(delay); + } + + public Future flush() { + return this.getClient().flush(); + } + + public boolean waitForQueues(long timeout, TimeUnit unit) { + return this.getClient().waitForQueues(timeout, unit); + } + + public boolean addObserver(ConnectionObserver obs) { + return this.getClient().addObserver(obs); + } + + public boolean removeObserver(ConnectionObserver obs) { + return this.getClient().removeObserver(obs); + } + + public Set listSaslMechanisms() { + return this.getClient().listSaslMechanisms(); + } + + @Override + public CollectionFuture asyncSetAttr(String key, Attributes attrs) { + return this.getClient().asyncSetAttr(key, attrs); + } + + @Override + public CollectionFuture asyncSetAttr(String key, + Integer expireTime, Long maxCount, + CollectionOverflowAction overflowAction) { + return this.getClient().asyncSetAttr(key, expireTime, maxCount, + overflowAction); + } + + @Override + public CollectionFuture asyncGetAttr(String key) { + return this.getClient().asyncGetAttr(key); + } + + @Override + public CollectionFuture asyncSopExist(String key, T value, + Transcoder tc) { + return this.getClient().asyncSopExist(key, value, tc); + } + + @Override + public CollectionFuture asyncSopExist(String key, Object value) { + return this.getClient().asyncSopExist(key, value); + } + + @Override + public Future> asyncSetBulk( + List key, int exp, T o, Transcoder tc) { + return this.getClient().asyncSetBulk(key, exp, o, tc); + } + + @Override + public Future> asyncSetBulk( + List key, int exp, Object o) { + return this.getClient().asyncSetBulk(key, exp, o); + } + + @Override + public Future> asyncSetBulk( + Map o, int exp, Transcoder tc) { + return this.getClient().asyncSetBulk(o, exp, tc); + } + + @Override + public Future> asyncSetBulk( + Map o, int exp) { + return this.getClient().asyncSetBulk(o, exp); + } + + @Override + public Future> asyncBopInsertBulk( + List keyList, long bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopInsertBulk(keyList, bkey, eFlag, value, + attributesForCreate, tc); + } + + @Override + public Future> asyncBopInsertBulk( + List keyList, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopInsertBulk(keyList, bkey, eFlag, value, + attributesForCreate); + } + + @Override + public Future> asyncLopInsertBulk( + List keyList, int index, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncLopInsertBulk(keyList, index, value, + attributesForCreate, tc); + } + + @Override + public Future> asyncLopInsertBulk( + List keyList, int index, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncLopInsertBulk(keyList, index, value, + attributesForCreate); + } + + @Override + public Future> asyncSopInsertBulk( + List keyList, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncSopInsertBulk(keyList, value, + attributesForCreate, tc); + } + + @Override + public Future> asyncSopInsertBulk( + List keyList, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncSopInsertBulk(keyList, value, + attributesForCreate); + } + + @Override + public int getMaxPipedItemCount() { + return CollectionPipedStore.MAX_PIPED_ITEM_COUNT; + } + + @Override + public CollectionFuture asyncBopCreate(String key, + ElementValueType valueType, CollectionAttributes attributes) { + return this.getClient().asyncBopCreate(key, valueType, attributes); + } + + @Override + public CollectionFuture asyncSopCreate(String key, + ElementValueType type, CollectionAttributes attributes) { + return this.getClient().asyncSopCreate(key, type, attributes); + } + + @Override + public CollectionFuture asyncLopCreate(String key, + ElementValueType type, CollectionAttributes attributes) { + return this.getClient().asyncLopCreate(key, type, attributes); + } + + @Override + public CollectionFuture>> asyncBopGet(String key, + long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, + boolean dropIfEmpty) { + return this.getClient().asyncBopGet(key, bkey, eFlagFilter, withDelete, + dropIfEmpty); + } + + @Override + public CollectionFuture>> asyncBopGet(String key, + long from, long to, ElementFlagFilter eFlagFilter, int offset, + int count, boolean withDelete, boolean dropIfEmpty) { + return this.getClient().asyncBopGet(key, from, to, eFlagFilter, offset, + count, withDelete, dropIfEmpty); + } + + @Override + public CollectionFuture>> asyncBopGet(String key, + long bkey, ElementFlagFilter eFlagFilter, boolean withDelete, + boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncBopGet(key, bkey, eFlagFilter, withDelete, + dropIfEmpty, tc); + } + + @Override + public CollectionFuture>> asyncBopGet(String key, + long from, long to, ElementFlagFilter eFlagFilter, int offset, + int count, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncBopGet(key, from, to, eFlagFilter, offset, + count, withDelete, dropIfEmpty, tc); + } + + @Override + public CollectionFuture> asyncLopGet(String key, int index, + boolean withDelete, boolean dropIfEmpty) { + return this.getClient() + .asyncLopGet(key, index, withDelete, dropIfEmpty); + } + + @Override + public CollectionFuture> asyncLopGet(String key, int from, + int to, boolean withDelete, boolean dropIfEmpty) { + return this.getClient().asyncLopGet(key, from, to, withDelete, + dropIfEmpty); + } + + @Override + public CollectionFuture> asyncLopGet(String key, int index, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncLopGet(key, index, withDelete, + dropIfEmpty, tc); + } + + @Override + public CollectionFuture> asyncLopGet(String key, int from, + int to, boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncLopGet(key, from, to, withDelete, + dropIfEmpty, tc); + } + + @Override + public CollectionFuture> asyncSopGet(String key, int count, + boolean withDelete, boolean dropIfEmpty) { + return this.getClient() + .asyncSopGet(key, count, withDelete, dropIfEmpty); + } + + @Override + public CollectionFuture> asyncSopGet(String key, int count, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncSopGet(key, count, withDelete, + dropIfEmpty, tc); + } + + @Override + public CollectionFuture asyncBopDelete(String key, long bkey, + ElementFlagFilter eFlagFilter, boolean dropIfEmpty) { + return this.getClient().asyncBopDelete(key, bkey, eFlagFilter, + dropIfEmpty); + } + + @Override + public CollectionFuture asyncBopDelete(String key, long from, + long to, ElementFlagFilter eFlagFilter, int count, + boolean dropIfEmpty) { + return this.getClient().asyncBopDelete(key, from, to, eFlagFilter, + count, dropIfEmpty); + } + + @Override + public CollectionFuture asyncLopDelete(String key, int index, + boolean dropIfEmpty) { + return this.getClient().asyncLopDelete(key, index, dropIfEmpty); + } + + @Override + public CollectionFuture asyncLopDelete(String key, int from, + int to, boolean dropIfEmpty) { + return this.getClient().asyncLopDelete(key, from, to, dropIfEmpty); + } + + @Override + public CollectionFuture asyncSopDelete(String key, Object value, + boolean dropIfEmpty) { + return this.getClient().asyncSopDelete(key, value, dropIfEmpty); + } + + @Override + public CollectionFuture asyncSopDelete(String key, T value, + boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncSopDelete(key, value, dropIfEmpty, tc); + } + + @Override + public CollectionFuture asyncBopGetItemCount(String key, + long from, long to, ElementFlagFilter eFlagFilter) { + return this.getClient() + .asyncBopGetItemCount(key, from, to, eFlagFilter); + } + + @Override + public CollectionFuture asyncBopInsert(String key, long bkey, + byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopInsert(key, bkey, eFlag, value, + attributesForCreate); + } + + @Override + public CollectionFuture asyncLopInsert(String key, int index, + Object value, CollectionAttributes attributesForCreate) { + return this.getClient().asyncLopInsert(key, index, value, + attributesForCreate); + } + + @Override + public CollectionFuture asyncSopInsert(String key, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncSopInsert(key, value, attributesForCreate); + } + + @Override + public CollectionFuture asyncBopInsert(String key, long bkey, + byte[] eFlag, T value, CollectionAttributes attributesForCreate, + Transcoder tc) { + return this.getClient().asyncBopInsert(key, bkey, eFlag, value, + attributesForCreate); + } + + @Override + public CollectionFuture asyncLopInsert(String key, int index, + T value, CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncLopInsert(key, index, value, + attributesForCreate, tc); + } + + @Override + public CollectionFuture asyncSopInsert(String key, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncSopInsert(key, value, attributesForCreate, + tc); + } + + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, Map elements, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopPipedInsertBulk(key, elements, + attributesForCreate); + } + + @Override + public CollectionFuture> asyncLopPipedInsertBulk( + String key, int index, List valueList, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncLopPipedInsertBulk(key, index, valueList, + attributesForCreate); + } + + @Override + public CollectionFuture> asyncSopPipedInsertBulk( + String key, List valueList, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncSopPipedInsertBulk(key, valueList, + attributesForCreate); + } + + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, Map elements, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopPipedInsertBulk(key, elements, + attributesForCreate, tc); + } + + @Override + public CollectionFuture> asyncLopPipedInsertBulk( + String key, int index, List valueList, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncLopPipedInsertBulk(key, index, valueList, + attributesForCreate, tc); + } + + @Override + public CollectionFuture> asyncSopPipedInsertBulk( + String key, List valueList, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncSopPipedInsertBulk(key, valueList, + attributesForCreate, tc); + } + + @Override + public OperationFuture flush(String prefix) { + return this.getClient().flush(prefix); + } + + @Override + public OperationFuture flush(String prefix, int delay) { + return this.getClient().flush(prefix, delay); + } + + @Override + public SMGetFuture>> asyncBopSortMergeGet( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count) { + return this.getClient().asyncBopSortMergeGet(keyList, from, to, + eFlagFilter, offset, count); + } + + @Override + public CollectionFuture asyncBopUpsert(String key, long bkey, + byte[] elementFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopUpsert(key, bkey, elementFlag, value, + attributesForCreate); + } + + @Override + public CollectionFuture asyncBopUpsert(String key, long bkey, + byte[] elementFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopUpsert(key, bkey, elementFlag, value, + attributesForCreate, tc); + } + + @Override + public CollectionFuture asyncBopInsert(String key, byte[] bkey, + byte[] eFlag, Object value, CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopInsert(key, bkey, eFlag, value, + attributesForCreate); + } + + @Override + public CollectionFuture asyncBopInsert(String key, + byte[] bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopInsert(key, bkey, eFlag, value, + attributesForCreate, tc); + } + + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, + int offset, int count, boolean withDelete, boolean dropIfEmpty) { + return this.getClient().asyncBopGet(key, from, to, eFlagFilter, offset, + count, withDelete, dropIfEmpty); + } + + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] from, byte[] to, ElementFlagFilter eFlagFilter, + int offset, int count, boolean withDelete, boolean dropIfEmpty, + Transcoder tc) { + return this.getClient().asyncBopGet(key, from, to, eFlagFilter, offset, + count, withDelete, dropIfEmpty, tc); + } + + @Override + public CollectionFuture asyncBopDelete(String key, byte[] from, + byte[] to, ElementFlagFilter eFlagFilter, int count, + boolean dropIfEmpty) { + return this.getClient().asyncBopDelete(key, from, to, eFlagFilter, + count, dropIfEmpty); + } + + @Override + public CollectionFuture asyncBopDelete(String key, byte[] bkey, + ElementFlagFilter eFlagFilter, boolean dropIfEmpty) { + return this.getClient().asyncBopDelete(key, bkey, eFlagFilter, + dropIfEmpty); + } + + @Override + public CollectionFuture asyncBopUpsert(String key, byte[] bkey, + byte[] elementFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopUpsert(key, bkey, elementFlag, value, + attributesForCreate); + } + + @Override + public CollectionFuture asyncBopUpsert(String key, + byte[] bkey, byte[] elementFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopUpsert(key, bkey, elementFlag, value, + attributesForCreate, tc); + } + + @Override + public CollectionFuture asyncBopGetItemCount(String key, + byte[] from, byte[] to, ElementFlagFilter eFlagFilter) { + return this.getClient() + .asyncBopGetItemCount(key, from, to, eFlagFilter); + } + + @Override + public CollectionFuture asyncBopUpdate(String key, long bkey, + ElementFlagUpdate eFlagUpdate, Object value) { + return this.getClient().asyncBopUpdate(key, bkey, eFlagUpdate, value); + } + + @Override + public CollectionFuture asyncBopUpdate(String key, long bkey, + ElementFlagUpdate eFlagUpdate, T value, Transcoder tc) { + return this.getClient().asyncBopUpdate(key, bkey, eFlagUpdate, value, + tc); + } + + @Override + public CollectionFuture asyncBopUpdate(String key, byte[] bkey, + ElementFlagUpdate eFlagUpdate, Object value) { + return this.getClient().asyncBopUpdate(key, bkey, eFlagUpdate, value); + } + + @Override + public CollectionFuture asyncBopUpdate(String key, + byte[] bkey, ElementFlagUpdate eFlagUpdate, T value, + Transcoder tc) { + return this.getClient().asyncBopUpdate(key, bkey, eFlagUpdate, value, + tc); + } + + @Override + public CollectionFuture> asyncBopPipedUpdateBulk( + String key, List> elements) { + return this.getClient().asyncBopPipedUpdateBulk(key, elements); + } + + @Override + public CollectionFuture> asyncBopPipedUpdateBulk( + String key, List> elements, Transcoder tc) { + return this.getClient().asyncBopPipedUpdateBulk(key, elements, tc); + } + + @Override + public CollectionFuture> asyncSopPipedExistBulk( + String key, List values) { + return this.getClient().asyncSopPipedExistBulk(key, values); + } + + @Override + public CollectionFuture> asyncSopPipedExistBulk( + String key, List values, Transcoder tc) { + return this.getClient().asyncSopPipedExistBulk(key, values, tc); + } + + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, List> elements, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopPipedInsertBulk(key, elements, + attributesForCreate); + } + + @Override + public CollectionFuture> asyncBopPipedInsertBulk( + String key, List> elements, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopPipedInsertBulk(key, elements, + attributesForCreate, tc); + } + + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] bkey, ElementFlagFilter eFlagFilter, + boolean withDelete, boolean dropIfEmpty) { + return this.getClient().asyncBopGet(key, bkey, eFlagFilter, withDelete, + dropIfEmpty); + } + + @Override + public CollectionFuture>> asyncBopGet( + String key, byte[] bkey, ElementFlagFilter eFlagFilter, + boolean withDelete, boolean dropIfEmpty, Transcoder tc) { + return this.getClient().asyncBopGet(key, bkey, eFlagFilter, withDelete, + dropIfEmpty, tc); + } + + @Override + public SMGetFuture>> asyncBopSortMergeGet( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count) { + return this.getClient().asyncBopSortMergeGet(keyList, from, to, + eFlagFilter, offset, count); + } + + @Override + public Future> asyncBopInsertBulk( + List keyList, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopInsertBulk(keyList, bkey, eFlag, value, + attributesForCreate); + } + + @Override + public Future> asyncBopInsertBulk( + List keyList, byte[] bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate, Transcoder tc) { + return this.getClient().asyncBopInsertBulk(keyList, bkey, eFlag, value, + attributesForCreate, tc); + } + + @Override + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count) { + return this.getClient().asyncBopGetBulk(keyList, from, to, eFlagFilter, + offset, count); + } + + @Override + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count, + Transcoder tc) { + return this.getClient().asyncBopGetBulk(keyList, from, to, eFlagFilter, + offset, count, tc); + } + + @Override + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count) { + return this.getClient().asyncBopGetBulk(keyList, from, to, eFlagFilter, + offset, count); + } + + @Override + public CollectionGetBulkFuture>> asyncBopGetBulk( + List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count, + Transcoder tc) { + return this.getClient().asyncBopGetBulk(keyList, from, to, eFlagFilter, + offset, count, tc); + } + + @Override + public CollectionFuture asyncBopIncr(String key, long subkey, int by) { + return this.getClient().asyncBopIncr(key, subkey, by); + } + + @Override + public CollectionFuture asyncBopIncr(String key, byte[] subkey, int by) { + return this.getClient().asyncBopIncr(key, subkey, by); + } + + @Override + public CollectionFuture asyncBopDecr(String key, long subkey, int by) { + return this.getClient().asyncBopIncr(key, subkey, by); + } + + @Override + public CollectionFuture asyncBopDecr(String key, byte[] subkey, int by) { + return this.getClient().asyncBopIncr(key, subkey, by); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int pos) { + return this.getClient().asyncBopGetByPosition(key, order, pos); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int pos, Transcoder tc) { + return this.getClient().asyncBopGetByPosition(key, order, pos, tc); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int from, int to) { + return this.getClient().asyncBopGetByPosition(key, order, from, to); + } + + @Override + public CollectionFuture>> asyncBopGetByPosition( + String key, BTreeOrder order, int from, int to, Transcoder tc) { + return this.getClient().asyncBopGetByPosition(key, order, from, to, tc); + } + + @Override + public CollectionFuture asyncBopFindPosition(String key, + long longBKey, BTreeOrder order) { + return this.getClient().asyncBopFindPosition(key, longBKey, order); + } + + @Override + public CollectionFuture asyncBopFindPosition(String key, + byte[] byteArrayBKey, BTreeOrder order) { + return this.getClient().asyncBopFindPosition(key, byteArrayBKey, order); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopInsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + return this.getClient().asyncBopInsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate, transcoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopInsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate); + } + + @Override + public BTreeStoreAndGetFuture asyncBopInsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + return this.getClient().asyncBopInsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate, transcoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopUpsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, long bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + return this.getClient().asyncBopUpsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate, transcoder); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, Object value, + CollectionAttributes attributesForCreate) { + return this.getClient().asyncBopUpsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate); + } + + @Override + public BTreeStoreAndGetFuture asyncBopUpsertAndGetTrimmed( + String key, byte[] bkey, byte[] eFlag, E value, + CollectionAttributes attributesForCreate, Transcoder transcoder) { + return this.getClient().asyncBopUpsertAndGetTrimmed(key, bkey, eFlag, + value, attributesForCreate, transcoder); + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ArcusKetamaNodeLocator.java b/src/main/java/net/spy/memcached/ArcusKetamaNodeLocator.java new file mode 100644 index 000000000..39cf98926 --- /dev/null +++ b/src/main/java/net/spy/memcached/ArcusKetamaNodeLocator.java @@ -0,0 +1,238 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.util.ArcusKetamaNodeLocatorConfiguration; + +public class ArcusKetamaNodeLocator extends SpyObject implements NodeLocator { + + SortedMap ketamaNodes; + Collection allNodes; + + HashAlgorithm hashAlg; + ArcusKetamaNodeLocatorConfiguration config; + + Lock lock = new ReentrantLock(); + + public ArcusKetamaNodeLocator(List nodes, HashAlgorithm alg) { + this(nodes, alg, new ArcusKetamaNodeLocatorConfiguration()); + } + + public ArcusKetamaNodeLocator(List nodes, HashAlgorithm alg, + ArcusKetamaNodeLocatorConfiguration conf) { + super(); + allNodes = nodes; + hashAlg = alg; + ketamaNodes = new TreeMap(); + config = conf; + + int numReps = config.getNodeRepetitions(); + for (MemcachedNode node : nodes) { + // Ketama does some special work with md5 where it reuses chunks. + if (alg == HashAlgorithm.KETAMA_HASH) { + updateHash(node, false); + } else { + for (int i = 0; i < numReps; i++) { + ketamaNodes.put( + hashAlg.hash(config.getKeyForNode(node, i)), node); + } + } + } + assert ketamaNodes.size() == numReps * nodes.size(); + } + + private ArcusKetamaNodeLocator(SortedMap smn, + Collection an, HashAlgorithm alg, + ArcusKetamaNodeLocatorConfiguration conf) { + super(); + ketamaNodes = smn; + allNodes = an; + hashAlg = alg; + config = conf; + } + + public Collection getAll() { + return allNodes; + } + + public MemcachedNode getPrimary(final String k) { + MemcachedNode rv = getNodeForKey(hashAlg.hash(k)); + assert rv != null : "Found no node for key " + k; + return rv; + } + + long getMaxKey() { + return ketamaNodes.lastKey(); + } + + MemcachedNode getNodeForKey(long hash) { + MemcachedNode rv; + + lock.lock(); + try { + if (!ketamaNodes.containsKey(hash)) { + // Java 1.6 adds a ceilingKey method, but I'm still stuck in 1.5 + // in a lot of places, so I'm doing this myself. + SortedMap tailMap = ketamaNodes + .tailMap(hash); + if (tailMap.isEmpty()) { + hash = ketamaNodes.firstKey(); + } else { + hash = tailMap.firstKey(); + } + } + rv = ketamaNodes.get(hash); + } catch (RuntimeException e) { + throw e; + } finally { + lock.unlock(); + } + return rv; + } + + public Iterator getSequence(String k) { + return new KetamaIterator(k, allNodes.size()); + } + + public NodeLocator getReadonlyCopy() { + SortedMap smn = new TreeMap( + ketamaNodes); + Collection an = new ArrayList( + allNodes.size()); + + lock.lock(); + try { + // Rewrite the values a copy of the map. + for (Map.Entry me : smn.entrySet()) { + me.setValue(new MemcachedNodeROImpl(me.getValue())); + } + // Copy the allNodes collection. + for (MemcachedNode n : allNodes) { + an.add(new MemcachedNodeROImpl(n)); + } + } catch (RuntimeException e) { + throw e; + } finally { + lock.unlock(); + } + + return new ArcusKetamaNodeLocator(smn, an, hashAlg, config); + } + + public void update(Collection toAttach, + Collection toDelete) { + lock.lock(); + try { + // Add memcached nodes. + for (MemcachedNode node : toAttach) { + allNodes.add(node); + updateHash(node, false); + } + + // Remove memcached nodes. + for (MemcachedNode node : toDelete) { + allNodes.remove(node); + updateHash(node, true); + + try { + node.getSk().attach(null); + node.shutdown(); + } catch (IOException e) { + getLogger().error( + "Failed to shutdown the node : " + node.toString()); + node.setSk(null); + } + } + } catch (RuntimeException e) { + throw e; + } finally { + lock.unlock(); + } + } + + void updateHash(MemcachedNode node, boolean remove) { + // Ketama does some special work with md5 where it reuses chunks. + for (int i = 0; i < config.getNodeRepetitions() / 4; i++) { + byte[] digest = HashAlgorithm.computeMd5(config.getKeyForNode(node, + i)); + for (int h = 0; h < 4; h++) { + Long k = ((long) (digest[3 + h * 4] & 0xFF) << 24) + | ((long) (digest[2 + h * 4] & 0xFF) << 16) + | ((long) (digest[1 + h * 4] & 0xFF) << 8) + | (digest[h * 4] & 0xFF); + if (remove) { + ketamaNodes.remove(k); + config.removeNode(node); + } else { + ketamaNodes.put(k, node); + } + } + } + } + + class KetamaIterator implements Iterator { + + final String key; + long hashVal; + int remainingTries; + int numTries = 0; + + public KetamaIterator(final String k, final int t) { + super(); + hashVal = hashAlg.hash(k); + remainingTries = t; + key = k; + } + + private void nextHash() { + long tmpKey = hashAlg.hash((numTries++) + key); + // This echos the implementation of Long.hashCode() + hashVal += (int) (tmpKey ^ (tmpKey >>> 32)); + hashVal &= 0xffffffffL; /* truncate to 32-bits */ + remainingTries--; + } + + public boolean hasNext() { + return remainingTries > 0; + } + + public MemcachedNode next() { + try { + return getNodeForKey(hashVal); + } finally { + nextHash(); + } + } + + public void remove() { + throw new UnsupportedOperationException("remove not supported"); + } + + } +} diff --git a/src/main/java/net/spy/memcached/ArcusMBeanServer.java b/src/main/java/net/spy/memcached/ArcusMBeanServer.java new file mode 100644 index 000000000..f4a8e1432 --- /dev/null +++ b/src/main/java/net/spy/memcached/ArcusMBeanServer.java @@ -0,0 +1,62 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.lang.management.ManagementFactory; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +public final class ArcusMBeanServer { + + private final MBeanServer mbserver; + + private static class SingletonHolder { + private final static ArcusMBeanServer INSTANCE = new ArcusMBeanServer(); + } + + private ArcusMBeanServer() { + mbserver = ManagementFactory.getPlatformMBeanServer(); + } + + public static ArcusMBeanServer getInstance() { + return SingletonHolder.INSTANCE; + } + + public boolean isRegistered(String name) { + try { + return mbserver != null + && mbserver.isRegistered(new ObjectName(name)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public void registMBean(Object o, String name) { + if (isRegistered(name)) { + return; + } + + if (mbserver != null) { + try { + mbserver.registerMBean(o, new ObjectName(name)); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/src/main/java/net/spy/memcached/ArrayModNodeLocator.java b/src/main/java/net/spy/memcached/ArrayModNodeLocator.java new file mode 100644 index 000000000..04286adad --- /dev/null +++ b/src/main/java/net/spy/memcached/ArrayModNodeLocator.java @@ -0,0 +1,125 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +/** + * NodeLocator implementation for dealing with simple array lookups using a + * modulus of the hash code and node list length. + */ +public final class ArrayModNodeLocator implements NodeLocator { + + final MemcachedNode[] nodes; + + private final HashAlgorithm hashAlg; + + /** + * Construct an ArraymodNodeLocator over the given array of nodes and + * using the given hash algorithm. + * + * @param n the array of nodes + * @param alg the hash algorithm + */ + public ArrayModNodeLocator(List n, HashAlgorithm alg) { + super(); + nodes=n.toArray(new MemcachedNode[n.size()]); + hashAlg=alg; + } + + private ArrayModNodeLocator(MemcachedNode[] n, HashAlgorithm alg) { + super(); + nodes=n; + hashAlg=alg; + } + + public Collection getAll() { + return Arrays.asList(nodes); + } + + public MemcachedNode getPrimary(String k) { + return nodes[getServerForKey(k)]; + } + + public Iterator getSequence(String k) { + return new NodeIterator(getServerForKey(k)); + } + + public NodeLocator getReadonlyCopy() { + MemcachedNode[] n=new MemcachedNode[nodes.length]; + for(int i=0; i toAttach, Collection toDelete) { + throw new UnsupportedOperationException("update not supported"); + } + + private int getServerForKey(String key) { + int rv=(int)(hashAlg.hash(key) % nodes.length); + assert rv >= 0 : "Returned negative key for key " + key; + assert rv < nodes.length + : "Invalid server number " + rv + " for key " + key; + return rv; + } + + class NodeIterator implements Iterator { + + private final int start; + private int next=0; + + public NodeIterator(int keyStart) { + start=keyStart; + next=start; + computeNext(); + assert next >= 0 || nodes.length == 1 + : "Starting sequence at " + start + " of " + + nodes.length + " next is " + next; + } + + public boolean hasNext() { + return next >= 0; + } + + private void computeNext() { + if(++next >= nodes.length) { + next=0; + } + if(next == start) { + next=-1; + } + } + + public MemcachedNode next() { + try { + return nodes[next]; + } finally { + computeNext(); + } + } + + public void remove() { + throw new UnsupportedOperationException("Can't remove a node"); + } + + } +} diff --git a/src/main/java/net/spy/memcached/BaseCacheMap.java b/src/main/java/net/spy/memcached/BaseCacheMap.java new file mode 100644 index 000000000..15bfe5f4c --- /dev/null +++ b/src/main/java/net/spy/memcached/BaseCacheMap.java @@ -0,0 +1,126 @@ +package net.spy.memcached; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +import net.spy.memcached.transcoders.Transcoder; + +/** + * Base class for a Map interface to memcached. + * + *

+ * This Map interface makes memcached a bit easier to use for some purposes + * by providing a limited Map implementation. + *

+ * + *

+ * Do note that nothing that iterates over the map will work (such is + * memcached). All iteration mechanisms will return empty iterators and + * such. + *

+ * + * @param the type of value taken and returned by this Map's underlying + * transcoder, and thus taken and returned by this Map. + */ +public class BaseCacheMap implements Map { + + private final String keyPrefix; + private final Transcoder transcoder; + private final MemcachedClientIF client; + private final int exp; + + /** + * Build a BaseCacheMap. + * + * @param c the underlying client + * @param expiration the expiration for objects set through this Map + * @param prefix a prefix to ensure objects in this map are unique + * @param t the transcoder to serialize and deserialize objects + */ + public BaseCacheMap(MemcachedClientIF c, int expiration, + String prefix, Transcoder t) { + super(); + keyPrefix = prefix; + transcoder = t; + client = c; + exp = expiration; + } + + public void clear() { + // TODO: Support a rolling key generation. + throw new UnsupportedOperationException(); + } + + private String getKey(String k) { + return keyPrefix + k; + } + + public boolean containsKey(Object key) { + return get(key) != null; + } + + /** + * This method always returns false, as truth cannot be determined without + * iteration. + */ + public boolean containsValue(Object value) { + return false; + } + + public Set> entrySet() { + return Collections.emptySet(); + } + + public V get(Object key) { + V rv = null; + try { + rv = client.get(getKey((String)key), transcoder); + } catch(ClassCastException e) { + // Most likely, this is because the key wasn't a String. + // Either way, it's a no. + } + return rv; + } + + public boolean isEmpty() { + return false; + } + + public Set keySet() { + return Collections.emptySet(); + } + + public void putAll(Map t) { + for(Map.Entry me : t.entrySet()) { + client.set(getKey(me.getKey()), exp, me.getValue()); + } + } + + public V remove(Object key) { + V rv = null; + try { + rv = get(key); + client.delete(getKey((String)key)); + } catch(ClassCastException e) { + // Not a string key. Ignore. + } + return rv; + } + + public int size() { + return 0; + } + + public Collection values() { + return Collections.emptySet(); + } + + public V put(String key, V value) { + V rv = get(key); + client.set(getKey(key), exp, value); + return rv; + } + +} diff --git a/src/main/java/net/spy/memcached/BinaryConnectionFactory.java b/src/main/java/net/spy/memcached/BinaryConnectionFactory.java new file mode 100644 index 000000000..3e1203edf --- /dev/null +++ b/src/main/java/net/spy/memcached/BinaryConnectionFactory.java @@ -0,0 +1,57 @@ +package net.spy.memcached; + +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; + +import net.spy.memcached.protocol.binary.BinaryMemcachedNodeImpl; +import net.spy.memcached.protocol.binary.BinaryOperationFactory; + +/** + * Default connection factory for binary wire protocol connections. + */ +public class BinaryConnectionFactory extends DefaultConnectionFactory { + + /** + * Create a DefaultConnectionFactory with the default parameters. + */ + public BinaryConnectionFactory() { + super(); + } + + /** + * Create a BinaryConnectionFactory with the given maximum operation + * queue length, and the given read buffer size. + */ + public BinaryConnectionFactory(int len, int bufSize) { + super(len, bufSize); + } + + /** + * Construct a BinaryConnectionFactory with the given parameters. + * + * @param len the queue length. + * @param bufSize the buffer size + * @param hash the algorithm to use for hashing + */ + public BinaryConnectionFactory(int len, int bufSize, HashAlgorithm hash) { + super(len, bufSize, hash); + } + + @Override + public MemcachedNode createMemcachedNode(SocketAddress sa, + SocketChannel c, int bufSize) { + boolean doAuth = false; + return new BinaryMemcachedNodeImpl(sa, c, bufSize, + createReadOperationQueue(), + createWriteOperationQueue(), + createOperationQueue(), + getOpQueueMaxBlockTime(), + doAuth); + } + + @Override + public OperationFactory getOperationFactory() { + return new BinaryOperationFactory(); + } + +} diff --git a/src/main/java/net/spy/memcached/BroadcastOpFactory.java b/src/main/java/net/spy/memcached/BroadcastOpFactory.java new file mode 100644 index 000000000..28e158ac9 --- /dev/null +++ b/src/main/java/net/spy/memcached/BroadcastOpFactory.java @@ -0,0 +1,17 @@ +package net.spy.memcached; + +import java.util.concurrent.CountDownLatch; + +import net.spy.memcached.ops.Operation; + +/** + * Factory for creating Operations to be broadcast. + */ +public interface BroadcastOpFactory { + + /** + * Construct a new operation for delivery to the given node. + * Each operation should count the given latch down upon completion. + */ + Operation newOp(MemcachedNode n, CountDownLatch latch); +} diff --git a/src/main/java/net/spy/memcached/BulkService.java b/src/main/java/net/spy/memcached/BulkService.java new file mode 100644 index 000000000..f3cdcb4d2 --- /dev/null +++ b/src/main/java/net/spy/memcached/BulkService.java @@ -0,0 +1,278 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.internal.BasicThreadFactory; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.StoreType; +import net.spy.memcached.transcoders.Transcoder; + +class BulkService extends SpyObject { + + private static int DEFAULT_LOOP_LIMIT; + private final ExecutorService executor; + private final long singleOpTimeout; + + BulkService(int loopLimit, int threadCount, long singleOpTimeout) { + this.executor = new ThreadPoolExecutor(threadCount, threadCount, 60L, + TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), + new BasicThreadFactory("bulk-service", true), + new ThreadPoolExecutor.AbortPolicy()); + BulkService.DEFAULT_LOOP_LIMIT = loopLimit; + this.singleOpTimeout = singleOpTimeout; + } + + Future> setBulk( + List keys, int exp, T value, Transcoder transcoder, + ArcusClient[] client) { + assert !executor.isShutdown() : "Pool has already shut down."; + BulkSetWorker w = new BulkSetWorker(keys, exp, value, transcoder, + client, singleOpTimeout); + BulkService.Task> task = new BulkService.Task>( + w); + executor.submit(task); + return task; + } + + Future> setBulk( + Map o, int exp, Transcoder transcoder, + ArcusClient[] client) { + assert !executor.isShutdown() : "Pool has already shut down."; + BulkSetWorker w = new BulkSetWorker(o, exp, transcoder, client, + singleOpTimeout); + BulkService.Task> task = new BulkService.Task>( + w); + executor.submit(task); + return task; + } + + void shutdown() { + try { + executor.shutdown(); + } catch (Exception e) { + getLogger().warn("exception while shutting down bulk set service.", + e); + } + } + + private static class Task extends FutureTask { + private final BulkWorker worker; + + public Task(BulkWorker w) { + super((Callable) w); + this.worker = w; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return worker.cancel() && super.cancel(mayInterruptIfRunning); + } + } + + /** + * Bulk operation worker + */ + private abstract static class BulkWorker extends SpyObject implements + Callable> { + + protected final ArcusClient[] clientList; + protected final Future[] future; + protected final long operationTimeout; + protected final AtomicBoolean isRunnable = new AtomicBoolean(true); + protected final Map errorList; + + protected final int totalCount; + protected final int fromIndex; + protected final int toIndex; + + public BulkWorker(int keySize, long timeout, Transcoder tc, + ArcusClient[] clientList) { + this.future = new Future[keySize]; + this.operationTimeout = timeout; + this.clientList = getOptimalClients(clientList); + this.errorList = new HashMap(); + + fromIndex = 0; + toIndex = keySize - 1; + totalCount = toIndex - fromIndex + 1; + } + + public boolean cancel() { + if (!isRunnable()) { + return false; + } + + isRunnable.set(false); + + boolean ret = true; + + for (Future f : future) { + if (f == null) { + continue; + } + if (f.isCancelled() || f.isDone()) { + continue; + } + ret &= f.cancel(true); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Cancel the future. " + f); + } + } + getLogger().info("Cancel, bulk set worker."); + return ret; + } + + private ArcusClient[] getOptimalClients(ArcusClient[] clientList) { + return clientList; + } + + protected boolean isRunnable() { + return isRunnable.get() && !Thread.currentThread().isInterrupted(); + } + + protected void setErrorOpStatus(String key, int indexOfFuture) { + errorList.put(key, + ((CollectionFuture) future[indexOfFuture]) + .getOperationStatus()); + } + + public abstract Future processItem(int index); + + public abstract void awaitProcessResult(int index); + + public abstract boolean isDataExists(); + + public Map call() throws Exception { + if (!isDataExists()) { + return errorList; + } + + for (int pos = fromIndex; isRunnable() && pos <= toIndex; pos++) { + if ((pos - fromIndex) > 0 + && (pos - fromIndex) % DEFAULT_LOOP_LIMIT == 0) { + for (int i = pos - DEFAULT_LOOP_LIMIT; isRunnable() + && i < pos; i++) { + awaitProcessResult(i); + } + } + try { + if (isRunnable()) { + future[pos] = processItem(pos); + } + } catch (IllegalStateException e) { + if (Thread.currentThread().isInterrupted()) { + break; + } else { + throw e; + } + } + } + for (int i = toIndex + - (totalCount % DEFAULT_LOOP_LIMIT == 0 ? DEFAULT_LOOP_LIMIT + : totalCount % DEFAULT_LOOP_LIMIT) + 1; isRunnable() + && i <= toIndex; i++) { + awaitProcessResult(i); + } + return errorList; + } + } + + /** + * Bulk set operation worker + */ + private static class BulkSetWorker extends BulkWorker { + private final List keys; + private final int exp; + private final int cntCos; + private List cos; + + public BulkSetWorker(List keys, int exp, T value, + Transcoder transcoder, ArcusClient[] clientList, + long timeout) { + super(keys.size(), timeout, transcoder, clientList); + this.keys = keys; + this.exp = exp; + this.cos = new ArrayList(); + this.cos.add(transcoder.encode(value)); + this.cntCos = 1; + } + + public BulkSetWorker(Map o, int exp, + Transcoder transcoder, ArcusClient[] clientList, long timeout) { + + super(o.keySet().size(), timeout, transcoder, clientList); + + this.keys = new ArrayList(o.keySet()); + this.exp = exp; + + this.cos = new ArrayList(); + for (String key : keys) { + this.cos.add(transcoder.encode(o.get(key))); + } + this.cntCos = this.cos.size(); + } + + @Override + public Future processItem(int index) { + return clientList[index % clientList.length].asyncStore( + StoreType.set, keys.get(index), exp, + (this.cntCos > 1 ? cos.get(index) : cos.get(0))); + } + + @Override + public void awaitProcessResult(int index) { + try { + boolean success = future[index].get(operationTimeout, + TimeUnit.MILLISECONDS); + if (!success) { + errorList.put( + keys.get(index), + new CollectionOperationStatus(false, String + .valueOf(success), CollectionResponse.END)); + } + } catch (Exception e) { + future[index].cancel(true); + errorList.put(keys.get(index), new CollectionOperationStatus( + false, e.getMessage(), CollectionResponse.EXCEPTION)); + } + } + + @Override + public boolean isDataExists() { + return (keys != null && keys.size() > 0); + } + } +} diff --git a/src/main/java/net/spy/memcached/CASMutation.java b/src/main/java/net/spy/memcached/CASMutation.java new file mode 100644 index 000000000..8b85cce8b --- /dev/null +++ b/src/main/java/net/spy/memcached/CASMutation.java @@ -0,0 +1,15 @@ +package net.spy.memcached; + +/** + * Defines a mutation mechanism for a high-level CAS client interface. + */ +public interface CASMutation { + + /** + * Get the new value to replace the current value. + * + * @param current the current value in the cache + * @return the replacement value + */ + T getNewValue(T current); +} diff --git a/src/main/java/net/spy/memcached/CASMutator.java b/src/main/java/net/spy/memcached/CASMutator.java new file mode 100644 index 000000000..2b2e1e57c --- /dev/null +++ b/src/main/java/net/spy/memcached/CASMutator.java @@ -0,0 +1,129 @@ +package net.spy.memcached; + +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Object that provides mutation via CAS over a given memcache client. + * + *

Example usage (reinventing incr):

+ * + *
+ * // Get or create a client.
+ * MemcachedClient client=[...];
+ *
+ * // Get a Transcoder.
+ * Transcoder tc = new LongTranscoder();
+ *
+ * // Get a mutator instance that uses that client.
+ * CASMutator<Long> mutator=new CASMutator<Long>(client, tc);
+ *
+ * // Get a mutation that knows what to do when a value is found.
+ * CASMutation<Long> mutation=new CASMutation<Long>() {
+ *     public Long getNewValue(Long current) {
+ *         return current + 1;
+ *     }
+ * };
+ *
+ * // Do a mutation.
+ * long currentValue=mutator.cas(someKey, 0L, 0, mutation);
+ * 
+ */ +public class CASMutator extends SpyObject { + + private static final int MAX_TRIES=8192; + + private final MemcachedClientIF client; + private final Transcoder transcoder; + private final int max; + + /** + * Construct a CASMutator that uses the given client. + * + * @param c the client + * @param tc the Transcoder to use + * @param max_tries the maximum number of attempts to get a CAS to succeed + */ + public CASMutator(MemcachedClientIF c, Transcoder tc, int max_tries) { + super(); + client=c; + transcoder=tc; + max=max_tries; + } + + /** + * Construct a CASMutator that uses the given client. + * + * @param c the client + * @param tc the Transcoder to use + */ + public CASMutator(MemcachedClientIF c, Transcoder tc) { + this(c, tc, MAX_TRIES); + } + + /** + * CAS a new value in for a key. + * + *

+ * Note that if initial is null, this method will only update existing + * values. + *

+ * + * @param key the key to be CASed + * @param initial the value to use when the object is not cached + * @param initialExp the expiration time to use when initializing + * @param m the mutation to perform on an object if a value exists for the + * key + * @return the new value that was set + */ + public T cas(final String key, final T initial, int initialExp, + final CASMutation m) throws Exception { + T rv=initial; + + boolean done=false; + for(int i=0; !done && i casval=client.gets(key, transcoder); + T current=null; + // If there were a CAS value, check to see if it's compatible. + if(casval != null) { + T tmp = casval.getValue(); + current=tmp; + } + // If we have anything mutate and CAS, else add. + if(current != null) { + // Declaring this impossible since the only way current can + // be non-null is if casval was set. + assert casval != null : "casval was null with a current value"; + + rv=m.getNewValue(current); + // There are three possibilities here: + // 1) It worked and we're done. + // 2) It collided and we need to reload and try again. + // 3) It disappeared between our fetch and our cas. + // We're ignoring #3 because it's *extremely* unlikely and the + // behavior will be fine in this code -- we'll do another gets + // and follow it up with either an add or another cas depending + // on whether it exists the next time. + if(client.cas(key, casval.getCas(), rv, transcoder) + == CASResponse.OK) { + done=true; + } + } else { + // No value found, try an add. + if(initial == null) { + done = true; + rv = null; + } else if(client.add(key, initialExp, initial, transcoder).get()) { + done=true; + rv=initial; + } + } + } + if(!done) { + throw new RuntimeException("Couldn't get a CAS in " + max + + " attempts"); + } + + return rv; + } +} diff --git a/src/main/java/net/spy/memcached/CASResponse.java b/src/main/java/net/spy/memcached/CASResponse.java new file mode 100644 index 000000000..bc6b20c49 --- /dev/null +++ b/src/main/java/net/spy/memcached/CASResponse.java @@ -0,0 +1,23 @@ +package net.spy.memcached; + +/** + * Response codes for a CAS operation. + */ +public enum CASResponse { + /** + * Status indicating that the CAS was successful and the new value is + * stored in the cache. + */ + OK, + /** + * Status indicating the value was not found in the cache (an add + * operation may be issued to store the value). + */ + NOT_FOUND, + /** + * Status indicating the value was found in the cache, but exists with a + * different CAS value than expected. In this case, the value must be + * refetched and the CAS operation tried again. + */ + EXISTS +} diff --git a/src/main/java/net/spy/memcached/CASValue.java b/src/main/java/net/spy/memcached/CASValue.java new file mode 100644 index 000000000..b5ec70fec --- /dev/null +++ b/src/main/java/net/spy/memcached/CASValue.java @@ -0,0 +1,41 @@ +package net.spy.memcached; + +/** + * A value with a CAS identifier. + */ +public class CASValue { + private final long cas; + private final T value; + + /** + * Construct a new CASValue with the given identifer and value. + * + * @param c the CAS identifier + * @param v the value + */ + public CASValue(long c, T v) { + super(); + cas=c; + value=v; + } + + /** + * Get the CAS identifier. + */ + public long getCas() { + return cas; + } + + /** + * Get the object value. + */ + public T getValue() { + return value; + } + + @Override + public String toString() { + return "{CasValue " + cas + "/" + value + "}"; + } + +} diff --git a/src/main/java/net/spy/memcached/CacheManager.java b/src/main/java/net/spy/memcached/CacheManager.java new file mode 100644 index 000000000..1e1be4658 --- /dev/null +++ b/src/main/java/net/spy/memcached/CacheManager.java @@ -0,0 +1,362 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +/** + * A program to use CacheMonitor to start and + * stop memcached node based on a znode. The program watches the + * specified znode and saves the znode that corresponds to the + * memcached server in the remote machine. It also changes the + * previous ketama node + */ +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.UnknownHostException; +import java.text.SimpleDateFormat; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.ArcusClientException.InitializeClientException; +import net.spy.memcached.compat.SpyThread; + +import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.ZooDefs.Ids; + +public class CacheManager extends SpyThread implements Watcher, + CacheMonitor.CacheMonitorListener { + public static final String CACHE_LIST_PATH = "/arcus/cache_list/"; + + public static final String CLIENT_INFO_PATH = "/arcus/client_list/"; + + private static final int SESSION_TIMEOUT = 15000; + + private static final long ZK_CONNECT_TIMEOUT = 2000L; + + private final String hostPort; + + private final String serviceCode; + + private CacheMonitor cacheMonitor; + + private ZooKeeper zk; + + private ArcusClient[] client; + + private final CountDownLatch clientInitLatch; + + private final ConnectionFactoryBuilder cfb; + + private final int waitTimeForConnect; + + private final int poolSize; + + private volatile boolean shutdownRequested = false; + + private CountDownLatch zkInitLatch; + + public CacheManager(String hostPort, String serviceCode, + ConnectionFactoryBuilder cfb, CountDownLatch clientInitLatch, int poolSize, + int waitTimeForConnect) { + + this.hostPort = hostPort; + this.serviceCode = serviceCode; + this.cfb = cfb; + this.clientInitLatch = clientInitLatch; + this.poolSize = poolSize; + this.waitTimeForConnect = waitTimeForConnect; + + initZooKeeperClient(); + + setName("Cache Manager IO for " + serviceCode + "@" + hostPort); + setDaemon(true); + start(); + + getLogger().info( + "CacheManager started. (" + serviceCode + "@" + hostPort + ")"); + + } + + private void initZooKeeperClient() { + try { + getLogger().info("Trying to connect to Arcus admin(%s@%s)", serviceCode, hostPort); + + zkInitLatch = new CountDownLatch(1); + zk = new ZooKeeper(hostPort, SESSION_TIMEOUT, this); + + try { + zkInitLatch.await(ZK_CONNECT_TIMEOUT, TimeUnit.MILLISECONDS); + + if (zk.exists(CacheManager.CACHE_LIST_PATH + serviceCode, false) == null) { + getLogger().fatal( + "Service code not found. (" + serviceCode + ")"); + throw new NotExistsServiceCodeException(serviceCode); + } + + String path = getClientInfo(); + if (path.isEmpty()) { + getLogger().fatal( + "Can't create the znode of client info (" + path + + ")"); + throw new InitializeClientException( + "Can't initialize Arcus client."); + } + + if (zk.exists(path, false) == null) { + zk.create(path, null, Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL); + } + } catch (NotExistsServiceCodeException e) { + shutdownZooKeeperClient(); + throw e; + } catch (InterruptedException ie) { + getLogger().fatal("Can't connect to Arcus admin(%s@%s) %s", serviceCode, hostPort, ie.getMessage()); + shutdownZooKeeperClient(); + return; + } catch (Exception e) { + getLogger().fatal( + "Unexpected exception. contact to Arcus administrator"); + + shutdownZooKeeperClient(); + throw new InitializeClientException( + "Can't initialize Arcus client.", e); + } + + cacheMonitor = new CacheMonitor(zk, serviceCode, this); + } catch (IOException e) { + throw new InitializeClientException( + "Can't initialize Arcus client.", e); + } + } + + private String getClientInfo() { + String path = ""; + + try { + SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyyMMddHHmmss"); + Date currentTime = new Date(); + + // create the ephemeral znode + // "/arcus/client_list/{service_code}/{client hostname}_{ip address}_{pool size}_java_{client version}_{YYYYMMDDHHIISS}_{zk session id}" + path = CLIENT_INFO_PATH + serviceCode + "/" + + InetAddress.getLocalHost().getHostName() + "_" + + InetAddress.getLocalHost().getHostAddress() + "_" + + this.poolSize + + "_java_" + + ArcusClient.VERSION + "_" + + simpleDateFormat.format(currentTime) + "_" + + zk.getSessionId(); + + } catch (UnknownHostException e) { + return null; + } + + return path; + } + + /*************************************************************************** + * We do process only child node change event ourselves, we just need to + * forward them on. + * + * @see org.apache.zookeeper.Watcher#process(org.apache.zookeeper.proto.WatcherEvent) + */ + public void process(WatchedEvent event) { + if (event.getType() == Event.EventType.None) { + switch (event.getState()) { + case SyncConnected: + getLogger().info("Connected to Arcus admin. (%s@%s)", serviceCode, hostPort); + zkInitLatch.countDown(); + } + } + + if (cacheMonitor != null) { + cacheMonitor.process(event); + } else { + getLogger().debug( + "cm is null, servicecode : %s, state:%s, type:%s", + serviceCode, event.getState(), event.getType()); + } + } + + public void run() { + try { + synchronized (this) { + while (!shutdownRequested) { + if (zk == null) { + getLogger().info("Arcus admin connection is not established. (%s@%s)", serviceCode, hostPort); + initZooKeeperClient(); + } + + if (!cacheMonitor.dead) { + wait(); + } else { + getLogger().warn("Unexpected disconnection from Arcus admin. Trying to reconnect to Arcus admin."); + try { + shutdownZooKeeperClient(); + initZooKeeperClient(); + } catch (NotExistsServiceCodeException e) { + Thread.sleep(5000L); + } catch (InitializeClientException e) { + Thread.sleep(5000L); + } + } + } + } + } catch (InterruptedException e) { + getLogger().warn("current arcus admin is interrupted : %s", + e.getMessage()); + } finally { + shutdownZooKeeperClient(); + } + } + + public void closing() { + synchronized (this) { + notifyAll(); + } + } + + /** + * Change current MemcachedNodes to new MemcachedNodes but intersection of + * current and new will be ruled out. + * + * @param children + * new children node list + */ + public void commandNodeChange(List children) { + String addrs = ""; + for (int i = 0; i < children.size(); i++) { + String[] temp = children.get(i).split("-"); + if (i != 0) { + addrs = addrs + "," + temp[0]; + } else { + addrs = temp[0]; + } + } + + if (client == null) { + createArcusClient(addrs); + return; + } + + for (ArcusClient ac : client) { + MemcachedConnection conn = ac.getMemcachedConnection(); + conn.putMemcachedQueue(addrs); + conn.getSelector().wakeup(); + } + } + + /** + * Create a ArcusClient + * + * @param addrs + * current available Memcached Addresses + */ + private void createArcusClient(String addrs) { + + List socketList = AddrUtil.getAddresses(addrs); + + final CountDownLatch latch = new CountDownLatch(socketList.size()); + final ConnectionObserver observer = new ConnectionObserver() { + + @Override + public void connectionLost(SocketAddress sa) { + + } + + @Override + public void connectionEstablished(SocketAddress sa, + int reconnectCount) { + latch.countDown(); + } + }; + + cfb.setInitialObservers(Collections.singleton(observer)); + + int _awaitTime = 0; + if (waitTimeForConnect == 0) + _awaitTime = 50 * socketList.size(); + else + _awaitTime = waitTimeForConnect; + + client = new ArcusClient[poolSize]; + for (int i = 0; i < poolSize; i++) { + try { + client[i] = ArcusClient.getInstance(cfb.build(), socketList); + client[i].setName("Memcached IO for " + serviceCode); + client[i].setCacheManager(this); + } catch (IOException e) { + getLogger() + .fatal("Arcus Connection has critical problems. contact arcus manager."); + } + } + try { + if (latch.await(_awaitTime, TimeUnit.MILLISECONDS)) { + getLogger().warn("All arcus connections are established."); + } else { + getLogger() + .error("Some arcus connections are not established."); + } + // Success signal for initial connections to Zookeeper and + // Memcached. + } catch (InterruptedException e) { + getLogger() + .fatal("Arcus Connection has critical problems. contact arcus manager."); + } + this.clientInitLatch.countDown(); + + } + + /** + * Returns current ArcusClient + * + * @return current ArcusClient + */ + public ArcusClient[] getAC() { + return client; + } + + private void shutdownZooKeeperClient() { + if (zk == null) { + return; + } + + try { + getLogger().info("Close the ZooKeeper client. serviceCode=" + serviceCode + ", adminSessionId=0x" + Long.toHexString(zk.getSessionId())); + zk.close(); + zk = null; + } catch (InterruptedException e) { + getLogger().warn( + "An exception occured while closing ZooKeeper client.", e); + } + } + + public void shutdown() { + if (!shutdownRequested) { + getLogger().info("Shut down cache manager."); + shutdownRequested = true; + closing(); + } + } +} diff --git a/src/main/java/net/spy/memcached/CacheMap.java b/src/main/java/net/spy/memcached/CacheMap.java new file mode 100644 index 000000000..3feaaf6dc --- /dev/null +++ b/src/main/java/net/spy/memcached/CacheMap.java @@ -0,0 +1,40 @@ +package net.spy.memcached; + +/** + * A Map interface to memcached. + * + *

+ * Do note that nothing that iterates over the map will work (such is + * memcached). All iteration mechanisms will return empty iterators and + * such. + *

+ */ +public class CacheMap extends BaseCacheMap { + + /** + * Construct a CacheMap over the given MemcachedClient. + * + * @param c the client + * @param expiration the expiration to set for keys written to the cache + * @param prefix a prefix used to make keys in this map unique + */ + public CacheMap(MemcachedClientIF c, int expiration, String prefix) { + super(c, expiration, prefix, c.getTranscoder()); + } + + /** + * Construct a CacheMap over the given MemcachedClient with no expiration. + * + *

+ * Keys written into this Map will only expire when the LRU pushes them + * out. + *

+ * + * @param c the client + * @param prefix a prefix used to make keys in this map unique + */ + public CacheMap(MemcachedClientIF c, String prefix) { + this(c, 0, prefix); + } + +} diff --git a/src/main/java/net/spy/memcached/CacheMonitor.java b/src/main/java/net/spy/memcached/CacheMonitor.java new file mode 100644 index 000000000..48c40dddf --- /dev/null +++ b/src/main/java/net/spy/memcached/CacheMonitor.java @@ -0,0 +1,203 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.List; + +import net.spy.memcached.compat.SpyObject; + +import org.apache.zookeeper.AsyncCallback.ChildrenCallback; +import org.apache.zookeeper.KeeperException.Code; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.ZooKeeper; + +/** + * CacheMonitor monitors the changes of the cache server list + * in the ZooKeeper node(/arcus/cache_list/). + */ +public class CacheMonitor extends SpyObject implements Watcher, + ChildrenCallback { + + ZooKeeper zk; + + String serviceCode; + + volatile boolean dead; + + CacheMonitorListener listener; + + List prevChildren; + + /** + * The locator class of the spymemcached has an assumption + * that it should have one cache node at least. + * Thus, we add a fake server node in it + * if there's no cache servers for the given service code. + * This is just a work-around, but it works really. + */ + public static final String FAKE_SERVER_NODE = "0.0.0.0:23456"; + + /** + * Constructor + * + * @param zk + * ZooKeeper connection + * @param znode + * A node to be watched + * @param latch + * Connection establishment checker + * @param listener + * Callback listener + */ + public CacheMonitor(ZooKeeper zk, String serviceCode, + CacheMonitorListener listener) { + this.zk = zk; + this.serviceCode = serviceCode; + this.listener = listener; + + getLogger().info("Initializing the CacheMonitor."); + + // Get the cache list from the Arcus admin asynchronously. + // Returning list would be processed in processResult(). + asyncGetCacheList(); + } + + /** + * Other classes use the CacheMonitor by implementing this method + */ + public interface CacheMonitorListener { + /** + * The existing children of the node has changed. + */ + void commandNodeChange(List children); + + /** + * The ZooKeeper session is no longer valid. + */ + void closing(); + } + + /** + * Processes every events from the ZooKeeper. + */ + public void process(WatchedEvent event) { + if (event.getType() == Event.EventType.None) { + // Processes session events + switch (event.getState()) { + case SyncConnected: + getLogger().warn("Reconnected to the Arcus admin. " + getInfo()); + return; + case Disconnected: + getLogger().warn("Disconnected from the Arcus admin. Trying to reconnect. " + getInfo()); + return; + case Expired: + // If the session was expired, just shutdown this client to be re-initiated. + getLogger().warn("Session expired. Trying to reconnect to the Arcus admin." + getInfo()); + shutdown(); + return; + } + } else { + // Set a new watch on the znode when there are any changes in it. + if (event.getType() == Event.EventType.NodeChildrenChanged) { + asyncGetCacheList(); + } + } + } + + /** + * A callback function to process the result of getChildren(watch=true). + */ + public void processResult(int rc, String path, Object ctx, + List children) { + switch (Code.get(rc)) { + case OK: + commandNodeChange(children); + return; + case NONODE: + getLogger().fatal("Cannot find your service code. Please contact Arcus support to solve this problem. " + getInfo()); + return; + case SESSIONEXPIRED: + getLogger().warn("Session expired. Trying to reconnect to the Arcus admin. " + getInfo()); + shutdown(); + return; + case NOAUTH: + getLogger().fatal("Authorization failed " + getInfo()); + shutdown(); + return; + case CONNECTIONLOSS: + getLogger().warn("Connection lost. Trying to reconnect to the Arcus admin." + getInfo()); + asyncGetCacheList(); + return; + default: + getLogger().warn("Ignoring an unexpected event from the Arcus admin. code=" + Code.get(rc) + ", " + getInfo()); + asyncGetCacheList(); + return; + } + } + + /** + * Get the cache list asynchronously from the Arcus admin. + */ + void asyncGetCacheList() { + if (getLogger().isDebugEnabled()) { + getLogger().debug("Set a new watch on " + (CacheManager.CACHE_LIST_PATH + serviceCode)); + } + + zk.getChildren(CacheManager.CACHE_LIST_PATH + serviceCode, true, this, null); + } + + /** + * Let the CacheManager change the cache list. + * If there's no children in the znode, make a fake server node. + * @param children + */ + void commandNodeChange(List children) { + // If there's no children, add a fake server node to the list. + if (children.size() == 0) { + getLogger().error("Cannot find any cache nodes for your service code. Please contact Arcus support to solve this problem. " + getInfo()); + children.add(FAKE_SERVER_NODE); + } + + if (!children.equals(prevChildren)) { + getLogger().warn("Cache list has been changed : From=" + prevChildren + ", To=" + children + ", " + getInfo()); + } + + // Store the current children. + prevChildren = children; + + // Change the memcached node list. + listener.commandNodeChange(children); + } + + /** + * Shutdown the CacheMonitor. + */ + public void shutdown() { + getLogger().info("Shutting down the CacheMonitor. " + getInfo()); + dead = true; + listener.closing(); + } + + private String getInfo() { + String zkSessionId = null; + if (zk != null) { + zkSessionId = "0x" + Long.toHexString(zk.getSessionId()); + } + return "[serviceCode=" + serviceCode + ", adminSessionId=" + zkSessionId + "]"; + } +} diff --git a/src/main/java/net/spy/memcached/CachedData.java b/src/main/java/net/spy/memcached/CachedData.java new file mode 100644 index 000000000..da2a4f46c --- /dev/null +++ b/src/main/java/net/spy/memcached/CachedData.java @@ -0,0 +1,58 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached; + +import java.util.Arrays; + +/** + * Cached data with its attributes. + */ +public final class CachedData { + + /** + * Maximum data size allowed by memcached. + */ + public static final int MAX_SIZE = 1024*1024; + + private final int flags; + private final byte[] data; + + /** + * Get a CachedData instance for the given flags and byte array. + * + * @param f the flags + * @param d the data + * @param max_size the maximum allowable size. + */ + public CachedData(int f, byte[] d, int max_size) { + super(); + if(d.length > max_size) { + throw new IllegalArgumentException( + "Cannot cache data larger than " + max_size + + " bytes (you tried to cache a " + + d.length + " byte object)"); + } + flags=f; + data=d; + } + + /** + * Get the stored data. + */ + public byte[] getData() { + return data; + } + + /** + * Get the flags stored along with this value. + */ + public int getFlags() { + return flags; + } + + @Override + public String toString() { + return "{CachedData flags=" + flags + " data=" + + Arrays.toString(data) + "}"; + } +} diff --git a/src/main/java/net/spy/memcached/CollectionOperationException.java b/src/main/java/net/spy/memcached/CollectionOperationException.java new file mode 100644 index 000000000..c1d0ef844 --- /dev/null +++ b/src/main/java/net/spy/memcached/CollectionOperationException.java @@ -0,0 +1,30 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +public class CollectionOperationException extends RuntimeException { + + private static final long serialVersionUID = 8715298251738556350L; + + public CollectionOperationException(String message) { + super(message); + } + + public CollectionOperationException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/src/main/java/net/spy/memcached/ConnectionFactory.java b/src/main/java/net/spy/memcached/ConnectionFactory.java new file mode 100644 index 000000000..9064f4a99 --- /dev/null +++ b/src/main/java/net/spy/memcached/ConnectionFactory.java @@ -0,0 +1,197 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.BlockingQueue; + +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Factory for creating instances of MemcachedConnection. + * This is used to provide more fine-grained configuration of connections. + */ +public interface ConnectionFactory { + + /** + * Create a MemcachedConnection for the given SocketAddresses. + * + * @param addrs the addresses of the memcached servers + * @return a new MemcachedConnection connected to those addresses + * @throws IOException for problems initializing the memcached connections + */ + MemcachedConnection createConnection(List addrs) + throws IOException; + + /** + * Create a new memcached node. + */ + MemcachedNode createMemcachedNode(SocketAddress sa, + SocketChannel c, int bufSize); + + /** + * Create a BlockingQueue for operations for a connection. + */ + BlockingQueue createOperationQueue(); + + /** + * Create a BlockingQueue for the operations currently expecting to read + * responses from memcached. + */ + BlockingQueue createReadOperationQueue(); + + /** + * Create a BlockingQueue for the operations currently expecting to write + * requests to memcached. + */ + BlockingQueue createWriteOperationQueue(); + + /** + * Get the maximum amount of time (in milliseconds) a client is willing + * to wait to add a new item to a queue. + */ + long getOpQueueMaxBlockTime(); + + /** + * Create a NodeLocator instance for the given list of nodes. + */ + NodeLocator createLocator(List nodes); + + /** + * Get the operation factory for connections built by this connection + * factory. + */ + OperationFactory getOperationFactory(); + + /** + * Get the operation timeout used by this connection. + */ + long getOperationTimeout(); + + /** + * If true, the IO thread should be a daemon thread. + */ + boolean isDaemon(); + + /** + * If true, the nagle algorithm will be used on connected sockets. + * + *

+ * See {@link java.net.Socket#setTcpNoDelay(boolean)} for more information. + *

+ */ + boolean useNagleAlgorithm(); + + /** + * Observers that should be established at the time of connection + * instantiation. + * + * These observers will see the first connection established. + */ + Collection getInitialObservers(); + + /** + * Get the default failure mode for the underlying connection. + */ + FailureMode getFailureMode(); + + /** + * Get the default transcoder to be used in connections created by this + * factory. + */ + Transcoder getDefaultTranscoder(); + + /** + * Get the default collection transcoder to be used in connections created by this + * factory. + */ + Transcoder getDefaultCollectionTranscoder(); + + /** + * If true, low-level optimization is in effect. + */ + boolean shouldOptimize(); + + /** + * Get the read buffer size set at construct time. + */ + int getReadBufSize(); + + /** + * Get the hash algorithm to be used. + */ + public HashAlgorithm getHashAlg(); + + /** + * Maximum number of milliseconds to wait between reconnect attempts. + */ + long getMaxReconnectDelay(); + + /** + * Authenticate connections using the given auth descriptor. + * + * @return null if no authentication should take place + */ + AuthDescriptor getAuthDescriptor(); + + /** + * Maximum number of timeout exception for shutdown connection + */ + int getTimeoutExceptionThreshold(); + + /** + * Set the maximum number of front cache elements. + */ + int getMaxFrontCacheElements(); + + /** + * Set front cache's expire time. + */ + int getFrontCacheExpireTime(); + + /** + * Bulk service thread count + */ + int getBulkServiceThreadCount(); + + /** + * Bulk service loop limit + */ + int getBulkServiceLoopLimit(); + + /** + * Bulk service single operation timeout + */ + long getBulkServiceSingleOpTimeout(); + + /** + * get max smget key chunk size + */ + int getDefaultMaxSMGetKeyChunkSize(); + + /** + * get front cache name + */ + String getFrontCacheName(); +} diff --git a/src/main/java/net/spy/memcached/ConnectionFactoryBuilder.java b/src/main/java/net/spy/memcached/ConnectionFactoryBuilder.java new file mode 100644 index 000000000..fb56e8129 --- /dev/null +++ b/src/main/java/net/spy/memcached/ConnectionFactoryBuilder.java @@ -0,0 +1,505 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.BlockingQueue; + +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationQueueFactory; +import net.spy.memcached.protocol.ascii.AsciiOperationFactory; +import net.spy.memcached.protocol.binary.BinaryOperationFactory; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Builder for more easily configuring a ConnectionFactory. + */ +public class ConnectionFactoryBuilder { + + private OperationQueueFactory opQueueFactory; + private OperationQueueFactory readQueueFactory; + private OperationQueueFactory writeQueueFactory; + + private Transcoder transcoder; + private Transcoder collectionTranscoder; + + private FailureMode failureMode = FailureMode.Cancel; + + private Collection initialObservers + = Collections.emptyList(); + + private OperationFactory opFact; + + private Locator locator = Locator.ARCUSCONSISTENT; + private long opTimeout = -1; + private boolean isDaemon = true; + private boolean shouldOptimize = false; + private boolean useNagle = false; +// private long maxReconnectDelay = +// DefaultConnectionFactory.DEFAULT_MAX_RECONNECT_DELAY; + private long maxReconnectDelay = 1; + + private int readBufSize = -1; + private HashAlgorithm hashAlg = HashAlgorithm.KETAMA_HASH; + private AuthDescriptor authDescriptor = null; + private long opQueueMaxBlockTime = -1; + +// private int timeoutExceptionThreshold = DefaultConnectionFactory.DEFAULT_MAX_TIMEOUTEXCEPTION_THRESHOLD; + private int timeoutExceptionThreshold = 10; + + private int maxFrontCacheElements = DefaultConnectionFactory.DEFAULT_MAX_FRONTCACHE_ELEMENTS; + private int frontCacheExpireTime = DefaultConnectionFactory.DEFAULT_FRONTCACHE_EXPIRETIME; + + private int bulkServiceThreadCount = DefaultConnectionFactory.DEFAULT_BULKSERVICE_THREAD_COUNT; + private int bulkServiceLoopLimit = DefaultConnectionFactory.DEFAULT_BULKSERVICE_LOOP_LIMIT; + private long bulkServiceSingleOpTimeout = DefaultConnectionFactory.DEFAULT_BULKSERVICE_SINGLE_OP_TIMEOUT; + + private int maxSMGetChunkSize = DefaultConnectionFactory.DEFAULT_MAX_SMGET_KEY_CHUNK_SIZE; + + private String frontCacheName = "ArcusFrontCache_" + this.hashCode(); + + /** + * Set the operation queue factory. + */ + public ConnectionFactoryBuilder setOpQueueFactory(OperationQueueFactory q) { + opQueueFactory = q; + return this; + } + + /** + * Set the read queue factory. + */ + public ConnectionFactoryBuilder setReadOpQueueFactory(OperationQueueFactory q) { + readQueueFactory = q; + return this; + } + + /** + * Set the write queue factory. + */ + public ConnectionFactoryBuilder setWriteOpQueueFactory(OperationQueueFactory q) { + writeQueueFactory = q; + return this; + } + + /** + * Set the maximum amount of time (in milliseconds) a client is willing to + * wait for space to become available in an output queue. + */ + public ConnectionFactoryBuilder setOpQueueMaxBlockTime(long t) { + opQueueMaxBlockTime = t; + return this; + } + + /** + * Set the default transcoder. + */ + public ConnectionFactoryBuilder setTranscoder(Transcoder t) { + transcoder = t; + return this; + } + + /** + * Set the default collection transcoder. + */ + public ConnectionFactoryBuilder setCollectionTranscoder(Transcoder t) { + collectionTranscoder = t; + return this; + } + + /** + * Set the failure mode. + */ + public ConnectionFactoryBuilder setFailureMode(FailureMode fm) { + failureMode = fm; + return this; + } + + /** + * Set the initial connection observers (will observe initial connection). + */ + public ConnectionFactoryBuilder setInitialObservers( + Collection obs) { + initialObservers = obs; + return this; + } + + /** + * Set the operation factory. + * + * Note that the operation factory is used to also imply the type of + * nodes to create. + * + * @see MemcachedNode + */ + public ConnectionFactoryBuilder setOpFact(OperationFactory f) { + opFact = f; + return this; + } + + /** + * Set the default operation timeout in milliseconds. + */ + public ConnectionFactoryBuilder setOpTimeout(long t) { + opTimeout = t; + return this; + } + + /** + * Set the daemon state of the IO thread (defaults to true). + */ + public ConnectionFactoryBuilder setDaemon(boolean d) { + isDaemon = d; + return this; + } + + /** + * Set to false if the default operation optimization is not desirable. + */ + public ConnectionFactoryBuilder setShouldOptimize(boolean o) { + shouldOptimize = o; + return this; + } + + /** + * Set the read buffer size. + */ + public ConnectionFactoryBuilder setReadBufferSize(int to) { + readBufSize = to; + return this; + } + + /** + * Set the hash algorithm. + */ + public ConnectionFactoryBuilder setHashAlg(HashAlgorithm to) { + hashAlg = to; + return this; + } + + /** + * Set to true if you'd like to enable the Nagle algorithm. + */ + public ConnectionFactoryBuilder setUseNagleAlgorithm(boolean to) { + useNagle = to; + return this; + } + + /** + * Convenience method to specify the protocol to use. + */ + public ConnectionFactoryBuilder setProtocol(Protocol prot) { + switch(prot) { + case TEXT: + opFact = new AsciiOperationFactory(); + break; + case BINARY: + opFact = new BinaryOperationFactory(); + break; + default: assert false : "Unhandled protocol: " + prot; + } + return this; + } + + /** + * Set the locator type. + */ + public ConnectionFactoryBuilder setLocatorType(Locator l) { + locator = l; + return this; + } + + /** + * Set the maximum reconnect delay. + */ + public ConnectionFactoryBuilder setMaxReconnectDelay(long to) { + assert to > 0 : "Reconnect delay must be a positive number"; + maxReconnectDelay = to; + return this; + } + + /** + * Set the auth descriptor to enable authentication on new connections. + */ + public ConnectionFactoryBuilder setAuthDescriptor(AuthDescriptor to) { + authDescriptor = to; + return this; + } + + /** + * Set the maximum timeout exception threshold + */ + public ConnectionFactoryBuilder setTimeoutExceptionThreshold(int to) { + assert to > 1 : "Minimum timeout exception threshold is 2"; + if (to > 1) { + timeoutExceptionThreshold = to -2; + } + return this; + } + + /** + * Set the maximum number of front cache elements. + */ + public ConnectionFactoryBuilder setMaxFrontCacheElements(int to) { + assert to > 0 : "In case of front cache, the number must be a positive number"; + maxFrontCacheElements = to; + return this; + } + + /** + * Set front cache's expire time. + */ + public ConnectionFactoryBuilder setFrontCacheExpireTime(int to) { + assert to > 0 : "Front cache's expire time must be a positive number"; + frontCacheExpireTime = to; + return this; + } + + /** + * Set bulk service default thread count + */ + public ConnectionFactoryBuilder setBulkServiceThreadCount(int to) { + assert to > 0 : "Bulk service's thread count must be a positive number"; + bulkServiceThreadCount = to; + return this; + } + + /** + * Set bulk service loop limit count + */ + public ConnectionFactoryBuilder setBulkServiceLoopLimit(int to) { + assert to > 0 : "Bulk service's loop limit must be a positive number"; + bulkServiceLoopLimit = to; + return this; + } + + /** + * Set bulk service each operation timeout + */ + public ConnectionFactoryBuilder setBulkServiceSingleOpTimeout(long to) { + assert to > 0 : "Bulk service's single operation timeout must be a positive number"; + bulkServiceSingleOpTimeout = to; + return this; + } + + /** + * Set max smget key chunk size + */ + public ConnectionFactoryBuilder setMaxSMGetKeyChunkSize(int size) { + maxSMGetChunkSize = size; + return this; + } + + /** + * Get the ConnectionFactory set up with the provided parameters. + */ + public ConnectionFactory build() { + return new DefaultConnectionFactory() { + + @Override + public BlockingQueue createOperationQueue() { + return opQueueFactory == null ? + super.createOperationQueue() : opQueueFactory.create(); + } + + @Override + public BlockingQueue createReadOperationQueue() { + return readQueueFactory == null ? + super.createReadOperationQueue() + : readQueueFactory.create(); + } + + @Override + public BlockingQueue createWriteOperationQueue() { + return writeQueueFactory == null ? + super.createReadOperationQueue() + : writeQueueFactory.create(); + } + + @Override + public NodeLocator createLocator(List nodes) { + switch(locator) { + case ARRAY_MOD: + return new ArrayModNodeLocator(nodes, getHashAlg()); + case CONSISTENT: + return new KetamaNodeLocator(nodes, getHashAlg()); + case ARCUSCONSISTENT: + return new ArcusKetamaNodeLocator(nodes, getHashAlg()); + default: throw new IllegalStateException( + "Unhandled locator type: " + locator); + } + } + + @Override + public Transcoder getDefaultTranscoder() { + return transcoder == null ? + super.getDefaultTranscoder() : transcoder; + } + + @Override + public Transcoder getDefaultCollectionTranscoder() { + return collectionTranscoder == null ? + super.getDefaultCollectionTranscoder() : collectionTranscoder; + } + + @Override + public FailureMode getFailureMode() { + return failureMode == null ? + super.getFailureMode() : failureMode; + } + + @Override + public HashAlgorithm getHashAlg() { + return hashAlg == null ? super.getHashAlg() : hashAlg; + } + + @Override + public Collection getInitialObservers() { + return initialObservers; + } + + @Override + public OperationFactory getOperationFactory() { + return opFact == null ? super.getOperationFactory() : opFact; + } + + @Override + public long getOperationTimeout() { + return opTimeout == -1 ? + super.getOperationTimeout() : opTimeout; + } + + @Override + public int getReadBufSize() { + return readBufSize == -1 ? + super.getReadBufSize() : readBufSize; + } + + @Override + public boolean isDaemon() { + return isDaemon; + } + + @Override + public boolean shouldOptimize() { + return shouldOptimize; + } + + @Override + public boolean useNagleAlgorithm() { + return useNagle; + } + + @Override + public long getMaxReconnectDelay() { + return maxReconnectDelay; + } + + @Override + public AuthDescriptor getAuthDescriptor() { + return authDescriptor; + } + + @Override + public long getOpQueueMaxBlockTime() { + return opQueueMaxBlockTime > -1 ? opQueueMaxBlockTime + : super.getOpQueueMaxBlockTime(); + } + + @Override + public int getTimeoutExceptionThreshold() { + return timeoutExceptionThreshold; + } + + @Override + public int getMaxFrontCacheElements() { + return maxFrontCacheElements; + } + + @Override + public int getFrontCacheExpireTime() { + return frontCacheExpireTime; + } + + @Override + public int getBulkServiceThreadCount() { + return bulkServiceThreadCount; + } + + @Override + public int getBulkServiceLoopLimit() { + return bulkServiceLoopLimit; + } + + @Override + public long getBulkServiceSingleOpTimeout() { + return bulkServiceSingleOpTimeout; + } + + @Override + public int getDefaultMaxSMGetKeyChunkSize() { + return maxSMGetChunkSize; + } + + @Override + public String getFrontCacheName() { + return frontCacheName; + } + }; + } + + /** + * Type of protocol to use for connections. + */ + public static enum Protocol { + /** + * Use the text (ascii) protocol. + */ + TEXT, + /** + * Use the binary protocol. + */ + BINARY + } + + /** + * Type of node locator to use. + */ + public static enum Locator { + /** + * Array modulus - the classic node location algorithm. + */ + ARRAY_MOD, + /** + * Fixed Consistent hash algorithm. + * + * This uses ketema's distribution algorithm, but may be used with any + * hash algorithm. + */ + CONSISTENT, + /** + * Live Consistent hash algorithm + * + * This uses ketama's distribution algorithm, and used with + * node change(add, delete) + */ + ARCUSCONSISTENT + } +} diff --git a/src/main/java/net/spy/memcached/ConnectionObserver.java b/src/main/java/net/spy/memcached/ConnectionObserver.java new file mode 100644 index 000000000..4fa1c4f53 --- /dev/null +++ b/src/main/java/net/spy/memcached/ConnectionObserver.java @@ -0,0 +1,27 @@ +package net.spy.memcached; + +import java.net.SocketAddress; + +/** + * Users of this interface will be notified when changes to the state of + * connections take place. + */ +public interface ConnectionObserver { + + /** + * A connection has just successfully been established on the given socket. + * + * @param sa the address of the node whose connection was established + * @param reconnectCount the number of attempts before the connection was + * established + */ + void connectionEstablished(SocketAddress sa, int reconnectCount); + + + /** + * A connection was just lost on the given socket. + * + * @param sa the address of the node whose connection was lost + */ + void connectionLost(SocketAddress sa); +} diff --git a/src/main/java/net/spy/memcached/DefaultConnectionFactory.java b/src/main/java/net/spy/memcached/DefaultConnectionFactory.java new file mode 100644 index 000000000..b9cd4d9f2 --- /dev/null +++ b/src/main/java/net/spy/memcached/DefaultConnectionFactory.java @@ -0,0 +1,407 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.protocol.ascii.AsciiMemcachedNodeImpl; +import net.spy.memcached.protocol.ascii.AsciiOperationFactory; +import net.spy.memcached.protocol.binary.BinaryMemcachedNodeImpl; +import net.spy.memcached.protocol.binary.BinaryOperationFactory; +import net.spy.memcached.transcoders.CollectionTranscoder; +import net.spy.memcached.transcoders.SerializingTranscoder; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Default implementation of ConnectionFactory. + * + *

+ * This implementation creates connections where the operation queue is an + * ArrayBlockingQueue and the read and write queues are unbounded + * LinkedBlockingQueues. The Redistribute FailureMode is always + * used. If other FailureModes are needed, look at the + * ConnectionFactoryBuilder. + * + *

+ */ +public class DefaultConnectionFactory extends SpyObject + implements ConnectionFactory { + + /** + * Default failure mode. + */ + public static final FailureMode DEFAULT_FAILURE_MODE = + FailureMode.Redistribute; + + /** + * Default hash algorithm. + */ + public static final HashAlgorithm DEFAULT_HASH = HashAlgorithm.NATIVE_HASH; + + /** + * Maximum length of the operation queue returned by this connection + * factory. + */ + public static final int DEFAULT_OP_QUEUE_LEN=16384; + + /** + * The maximum time to block waiting for op queue operations to complete, + * in milliseconds. The default has been set with the expectation that + * most requests are interactive and waiting for more than a few seconds + * is thus more undesirable than failing the request. + */ + public static final long DEFAULT_OP_QUEUE_MAX_BLOCK_TIME = + TimeUnit.SECONDS.toMillis(10); + + /** + * The read buffer size for each server connection from this factory. + */ + public static final int DEFAULT_READ_BUFFER_SIZE=16384; + + /** + * Default operation timeout in milliseconds. + */ + public static final long DEFAULT_OPERATION_TIMEOUT = 1000; + + /** + * Maximum amount of time (in seconds) to wait between reconnect attempts. + */ + public static final long DEFAULT_MAX_RECONNECT_DELAY = 30; + + /** + * Maximum number + 2 of timeout exception for shutdown connection + */ + public static final int DEFAULT_MAX_TIMEOUTEXCEPTION_THRESHOLD = 998; + + /** + * Maximum number of Front cache elements + */ + public static final int DEFAULT_MAX_FRONTCACHE_ELEMENTS = 0; + + /** + * Maximum number of Front cache elements + */ + public static final int DEFAULT_FRONTCACHE_EXPIRETIME = 5; + + /** + * Default bulk service thread count + */ + public static final int DEFAULT_BULKSERVICE_THREAD_COUNT = 1; + + /** + * Default bulk service loop limit + */ + public static final int DEFAULT_BULKSERVICE_LOOP_LIMIT = 1; + + /** + * Default bulk service single operation timeout + */ + public static final long DEFAULT_BULKSERVICE_SINGLE_OP_TIMEOUT = 1000L; + + /** + * Max smget key chunk size per request + */ + public static final int DEFAULT_MAX_SMGET_KEY_CHUNK_SIZE = 500; + + /** + * Default front cache name + */ + private static final String DEFAULT_FRONT_CACHE_NAME = "ArcusFrontCache" + new Object().hashCode(); + + private final int opQueueLen; + private final int readBufSize; + private final HashAlgorithm hashAlg; + + /** + * Construct a DefaultConnectionFactory with the given parameters. + * + * @param qLen the queue length. + * @param bufSize the buffer size + * @param hash the algorithm to use for hashing + */ + public DefaultConnectionFactory(int qLen, int bufSize, HashAlgorithm hash) { + super(); + opQueueLen=qLen; + readBufSize=bufSize; + hashAlg=hash; + } + + /** + * Create a DefaultConnectionFactory with the given maximum operation + * queue length, and the given read buffer size. + */ + public DefaultConnectionFactory(int qLen, int bufSize) { + this(qLen, bufSize, DEFAULT_HASH); + } + + /** + * Create a DefaultConnectionFactory with the default parameters. + */ + public DefaultConnectionFactory() { + this(DEFAULT_OP_QUEUE_LEN, DEFAULT_READ_BUFFER_SIZE); + } + + public MemcachedNode createMemcachedNode(SocketAddress sa, + SocketChannel c, int bufSize) { + + OperationFactory of = getOperationFactory(); + if(of instanceof AsciiOperationFactory) { + return new AsciiMemcachedNodeImpl(sa, c, bufSize, + createReadOperationQueue(), + createWriteOperationQueue(), + createOperationQueue(), + getOpQueueMaxBlockTime()); + } else if(of instanceof BinaryOperationFactory) { + boolean doAuth = false; + if (getAuthDescriptor() != null) { + doAuth = true; + } + return new BinaryMemcachedNodeImpl(sa, c, bufSize, + createReadOperationQueue(), + createWriteOperationQueue(), + createOperationQueue(), + getOpQueueMaxBlockTime(), + doAuth); + } else { + throw new IllegalStateException( + "Unhandled operation factory type " + of); + } + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#createConnection(java.util.List) + */ + public MemcachedConnection createConnection(List addrs) + throws IOException { + return new MemcachedConnection(getReadBufSize(), this, addrs, + getInitialObservers(), getFailureMode(), getOperationFactory()); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getFailureMode() + */ + public FailureMode getFailureMode() { + return DEFAULT_FAILURE_MODE; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#createOperationQueue() + */ + public BlockingQueue createOperationQueue() { + return new ArrayBlockingQueue(getOpQueueLen()); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#createReadOperationQueue() + */ + public BlockingQueue createReadOperationQueue() { + return new LinkedBlockingQueue(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#createWriteOperationQueue() + */ + public BlockingQueue createWriteOperationQueue() { + return new LinkedBlockingQueue(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#createLocator(java.util.List) + */ + public NodeLocator createLocator(List nodes) { + return new ArrayModNodeLocator(nodes, getHashAlg()); + } + + /** + * Get the op queue length set at construct time. + */ + public int getOpQueueLen() { + return opQueueLen; + } + + /** + * @return the maximum time to block waiting for op queue operations to + * complete, in milliseconds, or null for no waiting. + */ + public long getOpQueueMaxBlockTime() { + return DEFAULT_OP_QUEUE_MAX_BLOCK_TIME; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getReadBufSize() + */ + public int getReadBufSize() { + return readBufSize; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getHashAlg() + */ + public HashAlgorithm getHashAlg() { + return hashAlg; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getOperationFactory() + */ + public OperationFactory getOperationFactory() { + return new AsciiOperationFactory(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getOperationTimeout() + */ + public long getOperationTimeout() { + return DEFAULT_OPERATION_TIMEOUT; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#isDaemon() + */ + public boolean isDaemon() { + return false; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getInitialObservers() + */ + public Collection getInitialObservers() { + return Collections.emptyList(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getDefaultTranscoder() + */ + public Transcoder getDefaultTranscoder() { + return new SerializingTranscoder(); + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getDefaultCollectionTranscoder() + */ + public Transcoder getDefaultCollectionTranscoder() { + return new CollectionTranscoder(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#useNagleAlgorithm() + */ + public boolean useNagleAlgorithm() { + return false; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#shouldOptimize() + */ + public boolean shouldOptimize() { + return true; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getMaxReconnectDelay() + */ + public long getMaxReconnectDelay() { + return DEFAULT_MAX_RECONNECT_DELAY; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getAuthDescriptor() + */ + public AuthDescriptor getAuthDescriptor() { + return null; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getTimeoutExceptionThreshold() + */ + public int getTimeoutExceptionThreshold() { + return DEFAULT_MAX_TIMEOUTEXCEPTION_THRESHOLD; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getMaxFrontCacheElements() + */ + public int getMaxFrontCacheElements() { + return DEFAULT_MAX_FRONTCACHE_ELEMENTS; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getFrontCacheExpireTime() + */ + public int getFrontCacheExpireTime() { + return DEFAULT_FRONTCACHE_EXPIRETIME; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getBulkServiceThreadCount() + */ + @Override + public int getBulkServiceThreadCount() { + return DEFAULT_BULKSERVICE_THREAD_COUNT; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getBulkServiceLoopLimit() + */ + @Override + public int getBulkServiceLoopLimit() { + return DEFAULT_BULKSERVICE_LOOP_LIMIT; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getBulkServiceSingleOpTimeout() + */ + @Override + public long getBulkServiceSingleOpTimeout() { + return DEFAULT_BULKSERVICE_SINGLE_OP_TIMEOUT; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getDefaultMaxSMGetKeyChunkSize() + */ + @Override + public int getDefaultMaxSMGetKeyChunkSize() { + return DEFAULT_MAX_SMGET_KEY_CHUNK_SIZE; + } + + /* + * (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#getFrontCacheName() + */ + @Override + public String getFrontCacheName() { + return DEFAULT_FRONT_CACHE_NAME; + } +} diff --git a/src/main/java/net/spy/memcached/FailureMode.java b/src/main/java/net/spy/memcached/FailureMode.java new file mode 100644 index 000000000..d61490164 --- /dev/null +++ b/src/main/java/net/spy/memcached/FailureMode.java @@ -0,0 +1,34 @@ +package net.spy.memcached; + +/** + * Failure modes for node failures. + */ +public enum FailureMode { + + /** + * Move on to functional nodes when nodes fail. + * + *

+ * In this failure mode, the failure of a node will cause its current + * queue and future requests to move to the next logical node in the + * cluster for a given key. + *

+ */ + Redistribute, + /** + * Continue to retry a failing node until it comes back up. + * + *

+ * This failure mode is appropriate when you have a rare short downtime + * of a memcached node that will be back quickly, and your app is written + * to not wait very long for async command completion. + *

+ */ + Retry, + + /** + * Automatically cancel all operations heading towards a downed node. + */ + Cancel + +} diff --git a/src/main/java/net/spy/memcached/HashAlgorithm.java b/src/main/java/net/spy/memcached/HashAlgorithm.java new file mode 100644 index 000000000..364a5db79 --- /dev/null +++ b/src/main/java/net/spy/memcached/HashAlgorithm.java @@ -0,0 +1,147 @@ +package net.spy.memcached; + +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.zip.CRC32; + +/** + * Known hashing algorithms for locating a server for a key. + * Note that all hash algorithms return 64-bits of hash, but only the lower + * 32-bits are significant. This allows a positive 32-bit number to be + * returned for all cases. + */ +public enum HashAlgorithm { + + /** + * Native hash (String.hashCode()). + */ + NATIVE_HASH, + /** + * CRC32_HASH as used by the perl API. This will be more consistent both + * across multiple API users as well as java versions, but is mostly likely + * significantly slower. + */ + CRC32_HASH, + /** + * FNV hashes are designed to be fast while maintaining a low collision + * rate. The FNV speed allows one to quickly hash lots of data while + * maintaining a reasonable collision rate. + * + * @see fnv comparisons + * @see fnv at wikipedia + */ + FNV1_64_HASH, + /** + * Variation of FNV. + */ + FNV1A_64_HASH, + /** + * 32-bit FNV1. + */ + FNV1_32_HASH, + /** + * 32-bit FNV1a. + */ + FNV1A_32_HASH, + /** + * MD5-based hash algorithm used by ketama. + */ + KETAMA_HASH; + + private static final long FNV_64_INIT = 0xcbf29ce484222325L; + private static final long FNV_64_PRIME = 0x100000001b3L; + + private static final long FNV_32_INIT = 2166136261L; + private static final long FNV_32_PRIME = 16777619; + + private static MessageDigest MD5_DIGEST = null; + + static { + try { + MD5_DIGEST = MessageDigest.getInstance("MD5"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("MD5 not supported", e); + } + } + + /** + * Compute the hash for the given key. + * + * @return a positive integer hash + */ + public long hash(final String k) { + long rv = 0; + switch (this) { + case NATIVE_HASH: + rv = k.hashCode(); + break; + case CRC32_HASH: + // return (crc32(shift) >> 16) & 0x7fff; + CRC32 crc32 = new CRC32(); + crc32.update(KeyUtil.getKeyBytes(k)); + rv = (crc32.getValue() >> 16) & 0x7fff; + break; + case FNV1_64_HASH: { + // Thanks to pierre@demartines.com for the pointer + rv = FNV_64_INIT; + int len = k.length(); + for (int i = 0; i < len; i++) { + rv *= FNV_64_PRIME; + rv ^= k.charAt(i); + } + } + break; + case FNV1A_64_HASH: { + rv = FNV_64_INIT; + int len = k.length(); + for (int i = 0; i < len; i++) { + rv ^= k.charAt(i); + rv *= FNV_64_PRIME; + } + } + break; + case FNV1_32_HASH: { + rv = FNV_32_INIT; + int len = k.length(); + for (int i = 0; i < len; i++) { + rv *= FNV_32_PRIME; + rv ^= k.charAt(i); + } + } + break; + case FNV1A_32_HASH: { + rv = FNV_32_INIT; + int len = k.length(); + for (int i = 0; i < len; i++) { + rv ^= k.charAt(i); + rv *= FNV_32_PRIME; + } + } + break; + case KETAMA_HASH: + byte[] bKey=computeMd5(k); + rv = ((long) (bKey[3] & 0xFF) << 24) + | ((long) (bKey[2] & 0xFF) << 16) + | ((long) (bKey[1] & 0xFF) << 8) + | (bKey[0] & 0xFF); + break; + default: + assert false; + } + return rv & 0xffffffffL; /* Truncate to 32-bits */ + } + + /** + * Get the md5 of the given key. + */ + public static byte[] computeMd5(String k) { + MessageDigest md5; + try { + md5 = (MessageDigest)MD5_DIGEST.clone(); + } catch (CloneNotSupportedException e) { + throw new RuntimeException("clone of MD5 not supported", e); + } + md5.update(KeyUtil.getKeyBytes(k)); + return md5.digest(); + } +} diff --git a/src/main/java/net/spy/memcached/KetamaConnectionFactory.java b/src/main/java/net/spy/memcached/KetamaConnectionFactory.java new file mode 100644 index 000000000..7b523db75 --- /dev/null +++ b/src/main/java/net/spy/memcached/KetamaConnectionFactory.java @@ -0,0 +1,46 @@ +package net.spy.memcached; + +import java.util.List; + +/** + * ConnectionFactory instance that sets up a ketama compatible connection. + * + *

+ * This implementation piggy-backs on the functionality of the + * DefaultConnectionFactory in terms of connections and queue + * handling. Where it differs is that it uses both the + * KetamaNodeLocator and the HashAlgorithm.KETAMA_HASH + * to provide consistent node hashing. + *

+ * + * @see RJ's blog post + */ +public class KetamaConnectionFactory extends DefaultConnectionFactory { + /** + * Create a KetamaConnectionFactory with the given maximum operation + * queue length, and the given read buffer size. + * + * @param opQueueMaxBlockTime the maximum time to block waiting for op + * queue operations to complete, in milliseconds + */ + public KetamaConnectionFactory(int qLen, int bufSize, + long opQueueMaxBlockTime) { + super(qLen, bufSize, HashAlgorithm.KETAMA_HASH); + } + + /** + * Create a KetamaConnectionFactory with the default parameters. + */ + public KetamaConnectionFactory() { + this(DEFAULT_OP_QUEUE_LEN, DEFAULT_READ_BUFFER_SIZE, + DEFAULT_OP_QUEUE_MAX_BLOCK_TIME); + } + + /* (non-Javadoc) + * @see net.spy.memcached.ConnectionFactory#createLocator(java.util.List) + */ + @Override + public NodeLocator createLocator(List nodes) { + return new KetamaNodeLocator(nodes, getHashAlg()); + } +} diff --git a/src/main/java/net/spy/memcached/KetamaNodeLocator.java b/src/main/java/net/spy/memcached/KetamaNodeLocator.java new file mode 100644 index 000000000..3c7f7ce8a --- /dev/null +++ b/src/main/java/net/spy/memcached/KetamaNodeLocator.java @@ -0,0 +1,191 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.util.DefaultKetamaNodeLocatorConfiguration; +import net.spy.memcached.util.KetamaNodeLocatorConfiguration; + +/** + * This is an implementation of the Ketama consistent hash strategy from + * last.fm. This implementation may not be compatible with libketama as + * hashing is considered separate from node location. + * + * Note that this implementation does not currently supported weighted nodes. + * + * @see RJ's blog post + */ +public final class KetamaNodeLocator extends SpyObject implements NodeLocator { + + + final SortedMap ketamaNodes; + final Collection allNodes; + + final HashAlgorithm hashAlg; + final KetamaNodeLocatorConfiguration config; + + + public KetamaNodeLocator(List nodes, HashAlgorithm alg) { + this(nodes, alg, new DefaultKetamaNodeLocatorConfiguration()); + } + + public KetamaNodeLocator(List nodes, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) { + super(); + allNodes = nodes; + hashAlg = alg; + ketamaNodes=new TreeMap(); + config= conf; + + int numReps= config.getNodeRepetitions(); + for(MemcachedNode node : nodes) { + // Ketama does some special work with md5 where it reuses chunks. + if(alg == HashAlgorithm.KETAMA_HASH) { + for(int i=0; i smn, + Collection an, HashAlgorithm alg, KetamaNodeLocatorConfiguration conf) { + super(); + ketamaNodes=smn; + allNodes=an; + hashAlg=alg; + config=conf; + } + + public Collection getAll() { + return allNodes; + } + + public MemcachedNode getPrimary(final String k) { + MemcachedNode rv=getNodeForKey(hashAlg.hash(k)); + assert rv != null : "Found no node for key " + k; + return rv; + } + + long getMaxKey() { + return ketamaNodes.lastKey(); + } + + MemcachedNode getNodeForKey(long hash) { + final MemcachedNode rv; + if(!ketamaNodes.containsKey(hash)) { + // Java 1.6 adds a ceilingKey method, but I'm still stuck in 1.5 + // in a lot of places, so I'm doing this myself. + SortedMap tailMap=ketamaNodes.tailMap(hash); + if(tailMap.isEmpty()) { + hash=ketamaNodes.firstKey(); + } else { + hash=tailMap.firstKey(); + } + } + rv=ketamaNodes.get(hash); + return rv; + } + + public Iterator getSequence(String k) { + return new KetamaIterator(k, allNodes.size()); + } + + public NodeLocator getReadonlyCopy() { + SortedMap smn=new TreeMap( + ketamaNodes); + Collection an= + new ArrayList(allNodes.size()); + + // Rewrite the values a copy of the map. + for(Map.Entry me : smn.entrySet()) { + me.setValue(new MemcachedNodeROImpl(me.getValue())); + } + // Copy the allNodes collection. + for(MemcachedNode n : allNodes) { + an.add(new MemcachedNodeROImpl(n)); + } + + return new KetamaNodeLocator(smn, an, hashAlg, config); + } + + public void update(Collection toAttach, Collection toDelete) { + throw new UnsupportedOperationException("update not supported"); + } + + class KetamaIterator implements Iterator { + + final String key; + long hashVal; + int remainingTries; + int numTries=0; + + public KetamaIterator(final String k, final int t) { + super(); + hashVal=hashAlg.hash(k); + remainingTries=t; + key=k; + } + + private void nextHash() { + // this.calculateHash(Integer.toString(tries)+key).hashCode(); + long tmpKey=hashAlg.hash((numTries++) + key); + // This echos the implementation of Long.hashCode() + hashVal += (int)(tmpKey ^ (tmpKey >>> 32)); + hashVal &= 0xffffffffL; /* truncate to 32-bits */ + remainingTries--; + } + + public boolean hasNext() { + return remainingTries > 0; + } + + public MemcachedNode next() { + try { + return getNodeForKey(hashVal); + } finally { + nextHash(); + } + } + + public void remove() { + throw new UnsupportedOperationException("remove not supported"); + } + + } +} diff --git a/src/main/java/net/spy/memcached/KeyUtil.java b/src/main/java/net/spy/memcached/KeyUtil.java new file mode 100644 index 000000000..51537bc51 --- /dev/null +++ b/src/main/java/net/spy/memcached/KeyUtil.java @@ -0,0 +1,39 @@ +package net.spy.memcached; + +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.Collection; + +/** + * Utilities for processing key values. + */ +public class KeyUtil { + + /** + * Get the bytes for a key. + * + * @param k the key + * @return the bytes + */ + public static byte[] getKeyBytes(String k) { + try { + return k.getBytes("UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + /** + * Get the keys in byte form for all of the string keys. + * + * @param keys a collection of keys + * @return return a collection of the byte representations of keys + */ + public static Collection getKeyBytes(Collection keys) { + Collection rv=new ArrayList(keys.size()); + for(String s : keys) { + rv.add(getKeyBytes(s)); + } + return rv; + } +} diff --git a/src/main/java/net/spy/memcached/MemcachedClient.java b/src/main/java/net/spy/memcached/MemcachedClient.java new file mode 100644 index 000000000..a267d289a --- /dev/null +++ b/src/main/java/net/spy/memcached/MemcachedClient.java @@ -0,0 +1,1874 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedSelectorException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.auth.AuthThreadMonitor; +import net.spy.memcached.compat.SpyThread; +import net.spy.memcached.internal.BulkFuture; +import net.spy.memcached.internal.BulkGetFuture; +import net.spy.memcached.internal.GetFuture; +import net.spy.memcached.internal.OperationFuture; +import net.spy.memcached.internal.SingleElementInfiniteIterator; +import net.spy.memcached.ops.CASOperationStatus; +import net.spy.memcached.ops.CancelledOperationStatus; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StatsOperation; +import net.spy.memcached.ops.StoreType; +import net.spy.memcached.plugin.LocalCacheManager; +import net.spy.memcached.transcoders.TranscodeService; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Client to a memcached server. + * + *

Basic usage

+ * + *
+ *	MemcachedClient c=new MemcachedClient(
+ *		new InetSocketAddress("hostname", portNum));
+ *
+ *	// Store a value (async) for one hour
+ *	c.set("someKey", 3600, someObject);
+ *	// Retrieve a value.
+ *	Object myObject=c.get("someKey");
+ *	
+ * + *

Advanced Usage

+ * + *

+ * MemcachedClient may be processing a great deal of asynchronous messages or + * possibly dealing with an unreachable memcached, which may delay processing. + * If a memcached is disabled, for example, MemcachedConnection will continue + * to attempt to reconnect and replay pending operations until it comes back + * up. To prevent this from causing your application to hang, you can use + * one of the asynchronous mechanisms to time out a request and cancel the + * operation to the server. + *

+ * + *
+ *      // Get a memcached client connected to several servers
+ *      // over the binary protocol
+ *      MemcachedClient c = new MemcachedClient(new BinaryConnectionFactory(),
+ *              AddrUtil.getAddresses("server1:11211 server2:11211"));
+ *
+ *      // Try to get a value, for up to 5 seconds, and cancel if it
+ *      // doesn't return
+ *      Object myObj = null;
+ *      Future<Object> f = c.asyncGet("someKey");
+ *      try {
+ *          myObj = f.get(5, TimeUnit.SECONDS);
+ *      // throws expecting InterruptedException, ExecutionException
+ *      // or TimeoutException
+ *      } catch (Exception e) {  /*  /
+ *          // Since we don't need this, go ahead and cancel the operation.
+ *          // This is not strictly necessary, but it'll save some work on
+ *          // the server.  It is okay to cancel it if running.
+ *          f.cancel(true);
+ *          // Do other timeout related stuff
+ *      }
+ * 
+ */ +public class MemcachedClient extends SpyThread + implements MemcachedClientIF, ConnectionObserver { + + private volatile boolean running=true; + private volatile boolean shuttingDown=false; + public LocalCacheManager localCacheManager = null; + + protected final long operationTimeout; + + private final MemcachedConnection conn; + protected final OperationFactory opFact; + + protected final Transcoder transcoder; + + final TranscodeService tcService; + + final AuthDescriptor authDescriptor; + + private final AuthThreadMonitor authMonitor = new AuthThreadMonitor(); + + /** + * Get a memcache client operating on the specified memcached locations. + * + * @param ia the memcached locations + * @throws IOException if connections cannot be established + */ + public MemcachedClient(InetSocketAddress... ia) throws IOException { + this(new DefaultConnectionFactory(), Arrays.asList(ia)); + } + + /** + * Get a memcache client over the specified memcached locations. + * + * @param addrs the socket addrs + * @throws IOException if connections cannot be established + */ + public MemcachedClient(List addrs) + throws IOException { + this(new DefaultConnectionFactory(), addrs); + } + + /** + * Get a memcache client over the specified memcached locations. + * + * @param cf the connection factory to configure connections for this client + * @param addrs the socket addresses + * @throws IOException if connections cannot be established + */ + public MemcachedClient(ConnectionFactory cf, List addrs) + throws IOException { + if(cf == null) { + throw new NullPointerException("Connection factory required"); + } + if(addrs == null) { + throw new NullPointerException("Server list required"); + } + if(addrs.isEmpty()) { + throw new IllegalArgumentException( + "You must have at least one server to connect to"); + } + if(cf.getOperationTimeout() <= 0) { + throw new IllegalArgumentException( + "Operation timeout must be positive."); + } + tcService = new TranscodeService(cf.isDaemon()); + transcoder=cf.getDefaultTranscoder(); + opFact=cf.getOperationFactory(); + assert opFact != null : "Connection factory failed to make op factory"; + conn=cf.createConnection(addrs); + assert conn != null : "Connection factory failed to make a connection"; + operationTimeout = cf.getOperationTimeout(); + authDescriptor = cf.getAuthDescriptor(); + if(authDescriptor != null) { + addObserver(this); + } + setName("Memcached IO over " + conn); + setDaemon(cf.isDaemon()); + start(); + } + + /** + * Get the addresses of available servers. + * + *

+ * This is based on a snapshot in time so shouldn't be considered + * completely accurate, but is a useful for getting a feel for what's + * working and what's not working. + *

+ * + * @return point-in-time view of currently available servers + */ + public Collection getAvailableServers() { + ArrayList rv=new ArrayList(); + for(MemcachedNode node : conn.getLocator().getAll()) { + if(node.isActive()) { + rv.add(node.getSocketAddress()); + } + } + return rv; + } + + /** + * Get the addresses of unavailable servers. + * + *

+ * This is based on a snapshot in time so shouldn't be considered + * completely accurate, but is a useful for getting a feel for what's + * working and what's not working. + *

+ * + * @return point-in-time view of currently available servers + */ + public Collection getUnavailableServers() { + ArrayList rv=new ArrayList(); + for(MemcachedNode node : conn.getLocator().getAll()) { + if(!node.isActive()) { + rv.add(node.getSocketAddress()); + } + } + return rv; + } + + /** + * Get a read-only wrapper around the node locator wrapping this instance. + * + * @return this instance's NodeLocator + */ + public NodeLocator getNodeLocator() { + return conn.getLocator().getReadonlyCopy(); + } + + /** + * Get the default transcoder that's in use. + * + * @return this instance's Transcoder + */ + public Transcoder getTranscoder() { + return transcoder; + } + + protected void validateKey(String key) { + byte[] keyBytes=KeyUtil.getKeyBytes(key); + if(keyBytes.length > MAX_KEY_LENGTH) { + throw new IllegalArgumentException("Key is too long (maxlen = " + + MAX_KEY_LENGTH + ")"); + } + if(keyBytes.length == 0) { + throw new IllegalArgumentException( + "Key must contain at least one character."); + } + // Validate the key + for(byte b : keyBytes) { + if(b == ' ' || b == '\n' || b == '\r' || b == 0) { + throw new IllegalArgumentException( + "Key contains invalid characters: ``" + key + "''"); + } + } + } + + protected void checkState() { + if(shuttingDown) { + throw new IllegalStateException("Shutting down"); + } + assert isAlive() : "IO Thread is not running."; + } + + /** + * (internal use) Add a raw operation to a numbered connection. + * This method is exposed for testing. + * + * @param which server number + * @param op the operation to perform + * @return the Operation + */ + protected Operation addOp(final String key, final Operation op) { + validateKey(key); + checkState(); + conn.addOperation(key, op); + return op; + } + + protected CountDownLatch broadcastOp(final BroadcastOpFactory of) { + return broadcastOp(of, conn.getLocator().getAll(), true); + } + + CountDownLatch broadcastOp(final BroadcastOpFactory of, + Collection nodes) { + return broadcastOp(of, nodes, true); + } + + private CountDownLatch broadcastOp(BroadcastOpFactory of, + Collection nodes, + boolean checkShuttingDown) { + if(checkShuttingDown && shuttingDown) { + throw new IllegalStateException("Shutting down"); + } + return conn.broadcastOperation(of, nodes); + } + + private Future asyncStore(StoreType storeType, String key, + int exp, T value, Transcoder tc) { + CachedData co=tc.encode(value); + final CountDownLatch latch=new CountDownLatch(1); + final OperationFuture rv=new OperationFuture(latch, + operationTimeout); + Operation op=opFact.store(storeType, key, co.getFlags(), + exp, co.getData(), new OperationCallback() { + public void receivedStatus(OperationStatus val) { + rv.set(val.isSuccess()); + } + public void complete() { + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + private Future asyncStore(StoreType storeType, + String key, int exp, Object value) { + return asyncStore(storeType, key, exp, value, transcoder); + } + + private Future asyncCat( + ConcatenationType catType, long cas, String key, + T value, Transcoder tc) { + CachedData co=tc.encode(value); + final CountDownLatch latch=new CountDownLatch(1); + final OperationFuture rv=new OperationFuture(latch, + operationTimeout); + Operation op=opFact.cat(catType, cas, key, co.getData(), + new OperationCallback() { + public void receivedStatus(OperationStatus val) { + rv.set(val.isSuccess()); + } + public void complete() { + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Append to an existing value in the cache. + * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + * @param cas cas identifier (ignored in the ascii protocol) + * @param key the key to whose value will be appended + * @param val the value to append + * @return a future indicating success, false if there was no change + * to the value + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future append(long cas, String key, Object val) { + return append(cas, key, val, transcoder); + } + + /** + * Append to an existing value in the cache. + * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + * @param + * @param cas cas identifier (ignored in the ascii protocol) + * @param key the key to whose value will be appended + * @param val the value to append + * @param tc the transcoder to serialize and unserialize the value + * @return a future indicating success + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future append(long cas, String key, T val, + Transcoder tc) { + return asyncCat(ConcatenationType.append, cas, key, val, tc); + } + + /** + * Prepend to an existing value in the cache. + * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + * @param cas cas identifier (ignored in the ascii protocol) + * @param key the key to whose value will be prepended + * @param val the value to append + * @return a future indicating success + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future prepend(long cas, String key, Object val) { + return prepend(cas, key, val, transcoder); + } + + /** + * Prepend to an existing value in the cache. + * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + * @param + * @param cas cas identifier (ignored in the ascii protocol) + * @param key the key to whose value will be prepended + * @param val the value to append + * @param tc the transcoder to serialize and unserialize the value + * @return a future indicating success + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future prepend(long cas, String key, T val, + Transcoder tc) { + return asyncCat(ConcatenationType.prepend, cas, key, val, tc); + } + + /** + * Asynchronous CAS operation. + * + * @param + * @param key the key + * @param casId the CAS identifier (from a gets operation) + * @param value the new value + * @param tc the transcoder to serialize and unserialize the value + * @return a future that will indicate the status of the CAS + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncCAS(String key, long casId, T value, + Transcoder tc) { + return asyncCAS(key, casId, 0, value, tc); + } + + /** + * Asynchronous CAS operation. + * + * @param + * @param key the key + * @param casId the CAS identifier (from a gets operation) + * @param exp the expiration of this object + * @param value the new value + * @param tc the transcoder to serialize and unserialize the value + * @return a future that will indicate the status of the CAS + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncCAS(String key, long casId, int exp, T value, + Transcoder tc) { + CachedData co=tc.encode(value); + final CountDownLatch latch=new CountDownLatch(1); + final OperationFuture rv=new OperationFuture( + latch, operationTimeout); + Operation op=opFact.cas(StoreType.set, key, casId, co.getFlags(), exp, + co.getData(), new OperationCallback() { + public void receivedStatus(OperationStatus val) { + if(val instanceof CASOperationStatus) { + rv.set(((CASOperationStatus)val).getCASResponse()); + } else if(val instanceof CancelledOperationStatus) { + // Cancelled, ignore and let it float up + } else { + throw new RuntimeException( + "Unhandled state: " + val); + } + } + public void complete() { + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Asynchronous CAS operation using the default transcoder. + * + * @param key the key + * @param casId the CAS identifier (from a gets operation) + * @param value the new value + * @return a future that will indicate the status of the CAS + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncCAS(String key, long casId, Object value) { + return asyncCAS(key, casId, value, transcoder); + } + + /** + * Perform a synchronous CAS operation. + * + * @param + * @param key the key + * @param casId the CAS identifier (from a gets operation) + * @param value the new value + * @param tc the transcoder to serialize and unserialize the value + * @return a CASResponse + * @throws OperationTimeoutException if global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public CASResponse cas(String key, long casId, T value, + Transcoder tc) { + return cas(key, casId, 0, value, tc); + } + + /** + * Perform a synchronous CAS operation. + * + * @param + * @param key the key + * @param casId the CAS identifier (from a gets operation) + * @param exp the expiration of this object + * @param value the new value + * @param tc the transcoder to serialize and unserialize the value + * @return a CASResponse + * @throws OperationTimeoutException if global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public CASResponse cas(String key, long casId, int exp, T value, + Transcoder tc) { + try { + return asyncCAS(key, casId, exp, value, tc).get(operationTimeout, + TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted waiting for value", e); + } catch (ExecutionException e) { + throw new RuntimeException("Exception waiting for value", e); + } catch (TimeoutException e) { + throw new OperationTimeoutException("Timeout waiting for value", e); + } + } + + /** + * Perform a synchronous CAS operation with the default transcoder. + * + * @param key the key + * @param casId the CAS identifier (from a gets operation) + * @param value the new value + * @return a CASResponse + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public CASResponse cas(String key, long casId, Object value) { + return cas(key, casId, value, transcoder); + } + + /** + * Add an object to the cache iff it does not exist already. + * + *

+ * The exp value is passed along to memcached exactly as + * given, and will be processed per the memcached protocol specification: + *

+ * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + *
+ *

+ * The actual value sent may either be + * Unix time (number of seconds since January 1, 1970, as a 32-bit + * value), or a number of seconds starting from current time. In the + * latter case, this number of seconds may not exceed 60*60*24*30 (number + * of seconds in 30 days); if the number sent by a client is larger than + * that, the server will consider it to be real Unix time value rather + * than an offset from current time. + *

+ *
+ * + * @param + * @param key the key under which this object should be added. + * @param exp the expiration of this object + * @param o the object to store + * @param tc the transcoder to serialize and unserialize the value + * @return a future representing the processing of this operation + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future add(String key, int exp, T o, Transcoder tc) { + return asyncStore(StoreType.add, key, exp, o, tc); + } + + /** + * Add an object to the cache (using the default transcoder) + * iff it does not exist already. + * + *

+ * The exp value is passed along to memcached exactly as + * given, and will be processed per the memcached protocol specification: + *

+ * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + *
+ *

+ * The actual value sent may either be + * Unix time (number of seconds since January 1, 1970, as a 32-bit + * value), or a number of seconds starting from current time. In the + * latter case, this number of seconds may not exceed 60*60*24*30 (number + * of seconds in 30 days); if the number sent by a client is larger than + * that, the server will consider it to be real Unix time value rather + * than an offset from current time. + *

+ *
+ * + * @param key the key under which this object should be added. + * @param exp the expiration of this object + * @param o the object to store + * @return a future representing the processing of this operation + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future add(String key, int exp, Object o) { + return asyncStore(StoreType.add, key, exp, o, transcoder); + } + + /** + * Set an object in the cache regardless of any existing value. + * + *

+ * The exp value is passed along to memcached exactly as + * given, and will be processed per the memcached protocol specification: + *

+ * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + *
+ *

+ * The actual value sent may either be + * Unix time (number of seconds since January 1, 1970, as a 32-bit + * value), or a number of seconds starting from current time. In the + * latter case, this number of seconds may not exceed 60*60*24*30 (number + * of seconds in 30 days); if the number sent by a client is larger than + * that, the server will consider it to be real Unix time value rather + * than an offset from current time. + *

+ *
+ * + * @param + * @param key the key under which this object should be added. + * @param exp the expiration of this object + * @param o the object to store + * @param tc the transcoder to serialize and unserialize the value + * @return a future representing the processing of this operation + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future set(String key, int exp, T o, Transcoder tc) { + return asyncStore(StoreType.set, key, exp, o, tc); + } + + /** + * Set an object in the cache (using the default transcoder) + * regardless of any existing value. + * + *

+ * The exp value is passed along to memcached exactly as + * given, and will be processed per the memcached protocol specification: + *

+ * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + *
+ *

+ * The actual value sent may either be + * Unix time (number of seconds since January 1, 1970, as a 32-bit + * value), or a number of seconds starting from current time. In the + * latter case, this number of seconds may not exceed 60*60*24*30 (number + * of seconds in 30 days); if the number sent by a client is larger than + * that, the server will consider it to be real Unix time value rather + * than an offset from current time. + *

+ *
+ * + * @param key the key under which this object should be added. + * @param exp the expiration of this object + * @param o the object to store + * @return a future representing the processing of this operation + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future set(String key, int exp, Object o) { + return asyncStore(StoreType.set, key, exp, o, transcoder); + } + + /** + * Replace an object with the given value iff there is already a value + * for the given key. + * + *

+ * The exp value is passed along to memcached exactly as + * given, and will be processed per the memcached protocol specification: + *

+ * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + *
+ *

+ * The actual value sent may either be + * Unix time (number of seconds since January 1, 1970, as a 32-bit + * value), or a number of seconds starting from current time. In the + * latter case, this number of seconds may not exceed 60*60*24*30 (number + * of seconds in 30 days); if the number sent by a client is larger than + * that, the server will consider it to be real Unix time value rather + * than an offset from current time. + *

+ *
+ * + * @param + * @param key the key under which this object should be added. + * @param exp the expiration of this object + * @param o the object to store + * @param tc the transcoder to serialize and unserialize the value + * @return a future representing the processing of this operation + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future replace(String key, int exp, T o, + Transcoder tc) { + return asyncStore(StoreType.replace, key, exp, o, tc); + } + + /** + * Replace an object with the given value (transcoded with the default + * transcoder) iff there is already a value for the given key. + * + *

+ * The exp value is passed along to memcached exactly as + * given, and will be processed per the memcached protocol specification: + *

+ * + *

Note that the return will be false any time a mutation has not + * occurred.

+ * + *
+ *

+ * The actual value sent may either be + * Unix time (number of seconds since January 1, 1970, as a 32-bit + * value), or a number of seconds starting from current time. In the + * latter case, this number of seconds may not exceed 60*60*24*30 (number + * of seconds in 30 days); if the number sent by a client is larger than + * that, the server will consider it to be real Unix time value rather + * than an offset from current time. + *

+ *
+ * + * @param key the key under which this object should be added. + * @param exp the expiration of this object + * @param o the object to store + * @return a future representing the processing of this operation + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future replace(String key, int exp, Object o) { + return asyncStore(StoreType.replace, key, exp, o, transcoder); + } + + /** + * Get the given key asynchronously. + * + * @param + * @param key the key to fetch + * @param tc the transcoder to serialize and unserialize value + * @return a future that will hold the return value of the fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncGet(final String key, final Transcoder tc) { + + final CountDownLatch latch=new CountDownLatch(1); + final GetFuture rv=new GetFuture(latch, operationTimeout); + + Operation op=opFact.get(key, + new GetOperation.Callback() { + private Future val=null; + public void receivedStatus(OperationStatus status) { + rv.set(val); + } + public void gotData(String k, int flags, byte[] data) { + assert key.equals(k) : "Wrong key returned"; + val=tcService.decode(tc, + new CachedData(flags, data, tc.getMaxSize())); + } + public void complete() { + // FIXME weird... + if (localCacheManager != null) { + localCacheManager.put(key, val, operationTimeout); + } + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Get the given key asynchronously and decode with the default + * transcoder. + * + * @param key the key to fetch + * @return a future that will hold the return value of the fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncGet(final String key) { + return asyncGet(key, transcoder); + } + + /** + * Gets (with CAS support) the given key asynchronously. + * + * @param + * @param key the key to fetch + * @param tc the transcoder to serialize and unserialize value + * @return a future that will hold the return value of the fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future> asyncGets(final String key, + final Transcoder tc) { + + final CountDownLatch latch=new CountDownLatch(1); + final OperationFuture> rv= + new OperationFuture>(latch, operationTimeout); + + Operation op=opFact.gets(key, + new GetsOperation.Callback() { + private CASValue val=null; + public void receivedStatus(OperationStatus status) { + rv.set(val); + } + public void gotData(String k, int flags, long cas, byte[] data) { + assert key.equals(k) : "Wrong key returned"; + assert cas > 0 : "CAS was less than zero: " + cas; + val=new CASValue(cas, tc.decode( + new CachedData(flags, data, tc.getMaxSize()))); + } + public void complete() { + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Gets (with CAS support) the given key asynchronously and decode using + * the default transcoder. + * + * @param key the key to fetch + * @return a future that will hold the return value of the fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future> asyncGets(final String key) { + return asyncGets(key, transcoder); + } + + /** + * Gets (with CAS support) with a single key. + * + * @param + * @param key the key to get + * @param tc the transcoder to serialize and unserialize value + * @return the result from the cache and CAS id (null if there is none) + * @throws OperationTimeoutException if global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public CASValue gets(String key, Transcoder tc) { + try { + return asyncGets(key, tc).get( + operationTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted waiting for value", e); + } catch (ExecutionException e) { + throw new RuntimeException("Exception waiting for value", e); + } catch (TimeoutException e) { + throw new OperationTimeoutException("Timeout waiting for value", e); + } + } + + /** + * Gets (with CAS support) with a single key using the default transcoder. + * + * @param key the key to get + * @return the result from the cache and CAS id (null if there is none) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public CASValue gets(String key) { + return gets(key, transcoder); + } + + /** + * Get with a single key. + * + * @param + * @param key the key to get + * @param tc the transcoder to serialize and unserialize value + * @return the result from the cache (null if there is none) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public T get(String key, Transcoder tc) { + Future future = asyncGet(key, tc); + try { + return future.get( + operationTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + future.cancel(true); + throw new RuntimeException("Interrupted waiting for value", e); + } catch (ExecutionException e) { + future.cancel(true); + throw new RuntimeException("Exception waiting for value", e); + } catch (TimeoutException e) { + future.cancel(true); + throw new OperationTimeoutException("Timeout waiting for value", e); + } + } + + /** + * Get with a single key and decode using the default transcoder. + * + * @param key the key to get + * @return the result from the cache (null if there is none) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Object get(String key) { + return get(key, transcoder); + } + + /** + * Asynchronously get a bunch of objects from the cache. + * + * @param + * @param keys the keys to request + * @param tc_iter an iterator of transcoders to serialize and + * unserialize values; the transcoders are matched with + * the keys in the same order. The minimum of the key + * collection length and number of transcoders is used + * and no exception is thrown if they do not match + * @return a Future result of that fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public BulkFuture> asyncGetBulk(Collection keys, + Iterator> tc_iter) { + final Map> m=new ConcurrentHashMap>(); + + // This map does not need to be a ConcurrentHashMap + // because it is fully populated when it is used and + // used only to read the transcoder for a key. + final Map> tc_map = new HashMap>(); + + // Break the gets down into groups by key + final Map> chunks + =new HashMap>(); + final NodeLocator locator=conn.getLocator(); + Iterator key_iter=keys.iterator(); + while (key_iter.hasNext() && tc_iter.hasNext()) { + String key=key_iter.next(); + Transcoder tc = tc_iter.next(); + + // FIXME This should be refactored... + // And the original front-cache implementations are really weird :-( + if (localCacheManager != null) { + T cachedData = localCacheManager.get(key, tc); + if (cachedData != null) { + m.put(key, localCacheManager.asyncPreFetch(key, tc)); + continue; + } + } + + tc_map.put(key, tc); + validateKey(key); + final MemcachedNode primaryNode=locator.getPrimary(key); + MemcachedNode node=null; + if(primaryNode.isActive()) { + node=primaryNode; + } else { + for(Iterator i=locator.getSequence(key); + node == null && i.hasNext();) { + MemcachedNode n=i.next(); + if(n.isActive()) { + node=n; + } + } + if(node == null) { + node=primaryNode; + } + } + assert node != null : "Didn't find a node for " + key; + Collection ks=chunks.get(node); + if(ks == null) { + ks=new ArrayList(); + chunks.put(node, ks); + } + ks.add(key); + } + + final CountDownLatch latch=new CountDownLatch(chunks.size()); + final Collection ops=new ArrayList(chunks.size()); + + GetOperation.Callback cb=new GetOperation.Callback() { + @SuppressWarnings("synthetic-access") + public void receivedStatus(OperationStatus status) { + if(!status.isSuccess()) { + getLogger().warn("Unsuccessful get: %s", status); + } + } + public void gotData(String k, int flags, byte[] data) { + Transcoder tc = tc_map.get(k); + m.put(k, tcService.decode(tc, + new CachedData(flags, data, tc.getMaxSize()))); + } + public void complete() { + latch.countDown(); + } + }; + + // Now that we know how many servers it breaks down into, and the latch + // is all set up, convert all of these strings collections to operations + final Map mops= + new HashMap(); + + for(Map.Entry> me + : chunks.entrySet()) { + Operation op=opFact.get(me.getValue(), cb); + mops.put(me.getKey(), op); + ops.add(op); + } + assert mops.size() == chunks.size(); + checkState(); + conn.addOperations(mops); + return new BulkGetFuture(m, ops, latch, localCacheManager); + } + + /** + * Asynchronously get a bunch of objects from the cache. + * + * @param + * @param keys the keys to request + * @param tc the transcoder to serialize and unserialize values + * @return a Future result of that fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public BulkFuture> asyncGetBulk(Collection keys, + Transcoder tc) { + return asyncGetBulk(keys, new SingleElementInfiniteIterator(tc)); + } + + /** + * Asynchronously get a bunch of objects from the cache and decode them + * with the given transcoder. + * + * @param keys the keys to request + * @return a Future result of that fetch + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public BulkFuture> asyncGetBulk(Collection keys) { + return asyncGetBulk(keys, transcoder); + } + + /** + * Varargs wrapper for asynchronous bulk gets. + * + * @param + * @param tc the transcoder to serialize and unserialize value + * @param keys one more more keys to get + * @return the future values of those keys + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public BulkFuture> asyncGetBulk(Transcoder tc, + String... keys) { + return asyncGetBulk(Arrays.asList(keys), tc); + } + + /** + * Varargs wrapper for asynchronous bulk gets with the default transcoder. + * + * @param keys one more more keys to get + * @return the future values of those keys + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public BulkFuture> asyncGetBulk(String... keys) { + return asyncGetBulk(Arrays.asList(keys), transcoder); + } + + /** + * Get the values for multiple keys from the cache. + * + * @param + * @param keys the keys + * @param tc the transcoder to serialize and unserialize value + * @return a map of the values (for each value that exists) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map getBulk(Collection keys, + Transcoder tc) { + try { + return asyncGetBulk(keys, tc).get( + operationTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted getting bulk values", e); + } catch (ExecutionException e) { + throw new RuntimeException("Failed getting bulk values", e); + } catch (TimeoutException e) { + throw new OperationTimeoutException( + "Timeout waiting for bulkvalues", e); + } + } + + /** + * Get the values for multiple keys from the cache. + * + * @param keys the keys + * @return a map of the values (for each value that exists) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map getBulk(Collection keys) { + return getBulk(keys, transcoder); + } + + /** + * Get the values for multiple keys from the cache. + * + * @param + * @param tc the transcoder to serialize and unserialize value + * @param keys the keys + * @return a map of the values (for each value that exists) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map getBulk(Transcoder tc, String... keys) { + return getBulk(Arrays.asList(keys), tc); + } + + /** + * Get the values for multiple keys from the cache. + * + * @param keys the keys + * @return a map of the values (for each value that exists) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map getBulk(String... keys) { + return getBulk(Arrays.asList(keys), transcoder); + } + + /** + * Get the versions of all of the connected memcacheds. + * + * @return a Map of SocketAddress to String for connected servers + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map getVersions() { + final Maprv= + new ConcurrentHashMap(); + + CountDownLatch blatch = broadcastOp(new BroadcastOpFactory(){ + public Operation newOp(final MemcachedNode n, + final CountDownLatch latch) { + final SocketAddress sa=n.getSocketAddress(); + return opFact.version( + new OperationCallback() { + public void receivedStatus(OperationStatus s) { + rv.put(sa, s.getMessage()); + } + public void complete() { + latch.countDown(); + } + }); + }}); + try { + blatch.await(operationTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted waiting for versions", e); + } + return rv; + } + + /** + * Get all of the stats from all of the connections. + * + * @return a Map of a Map of stats replies by SocketAddress + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map> getStats() { + return getStats(null); + } + + /** + * Get a set of stats from all connections. + * + * @param arg which stats to get + * @return a Map of the server SocketAddress to a map of String stat + * keys to String stat values. + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Map> getStats(final String arg) { + final Map> rv + =new HashMap>(); + + CountDownLatch blatch = broadcastOp(new BroadcastOpFactory(){ + public Operation newOp(final MemcachedNode n, + final CountDownLatch latch) { + final SocketAddress sa=n.getSocketAddress(); + rv.put(sa, new HashMap()); + return opFact.stats(arg, + new StatsOperation.Callback() { + public void gotStat(String name, String val) { + rv.get(sa).put(name, val); + } + @SuppressWarnings("synthetic-access") // getLogger() + public void receivedStatus(OperationStatus status) { + if(!status.isSuccess()) { + getLogger().warn("Unsuccessful stat fetch: %s", + status); + } + } + public void complete() { + latch.countDown(); + }}); + }}); + try { + blatch.await(operationTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted waiting for stats", e); + } + return rv; + } + + private long mutate(Mutator m, String key, int by, long def, int exp) { + final AtomicLong rv=new AtomicLong(); + final CountDownLatch latch=new CountDownLatch(1); + addOp(key, opFact.mutate(m, key, by, def, exp, new OperationCallback() { + public void receivedStatus(OperationStatus s) { + // XXX: Potential abstraction leak. + // The handling of incr/decr in the binary protocol + // Allows us to avoid string processing. + rv.set(new Long(s.isSuccess()?s.getMessage():"-1")); + } + public void complete() { + latch.countDown(); + }})); + try { + if (!latch.await(operationTimeout, TimeUnit.MILLISECONDS)) { + throw new OperationTimeoutException( + "Mutate operation timed out, unable to modify counter [" + + key + "]"); + } + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted", e); + } + getLogger().debug("Mutation returned %s", rv); + return rv.get(); + } + + /** + * Increment the given key by the given amount. + * + * Due to the way the memcached server operates on items, incremented + * and decremented items will be returned as Strings with any + * operations that return a value. + * + * @param key the key + * @param by the amount to increment + * @return the new value (-1 if the key doesn't exist) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public long incr(String key, int by) { + return mutate(Mutator.incr, key, by, -1, 0); + } + + /** + * Decrement the given key by the given value. + * + * Due to the way the memcached server operates on items, incremented + * and decremented items will be returned as Strings with any + * operations that return a value. + * + * @param key the key + * @param by the value + * @return the new value (-1 if the key doesn't exist) + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public long decr(String key, int by) { + return mutate(Mutator.decr, key, by, -1, 0); + } + + /** + * Increment the given counter, returning the new value. + * + * Due to the way the memcached server operates on items, incremented + * and decremented items will be returned as Strings with any + * operations that return a value. + * + * @param key the key + * @param by the amount to increment + * @param def the default value (if the counter does not exist) + * @param exp the expiration of this object + * @return the new value, or -1 if we were unable to increment or add + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public long incr(String key, int by, long def, int exp) { + return mutate(Mutator.incr, key, by, def, exp); + } + + /** + * Decrement the given counter, returning the new value. + * + * Due to the way the memcached server operates on items, incremented + * and decremented items will be returned as Strings with any + * operations that return a value. + * + * @param key the key + * @param by the amount to decrement + * @param def the default value (if the counter does not exist) + * @param exp the expiration of this object + * @return the new value, or -1 if we were unable to decrement or add + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public long decr(String key, int by, long def, int exp) { + return mutate(Mutator.decr, key, by, def, exp); + } + + + private long mutateWithDefault(Mutator t, String key, + int by, long def, int exp) { + long rv=mutate(t, key, by, def, exp); + // The ascii protocol doesn't support defaults, so I added them + // manually here. + if(rv == -1) { + Future f=asyncStore(StoreType.add, + key, exp, String.valueOf(def)); + try { + if(f.get(operationTimeout, TimeUnit.MILLISECONDS)) { + rv=def; + } else { + rv=mutate(t, key, by, 0, exp); + assert rv != -1 : "Failed to mutate or init value"; + } + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted waiting for store", e); + } catch (ExecutionException e) { + throw new RuntimeException("Failed waiting for store", e); + } catch (TimeoutException e) { + throw new OperationTimeoutException( + "Timeout waiting to mutate or init value", e); + } + } + return rv; + } + + private Future asyncMutate(Mutator m, String key, int by, long def, + int exp) { + final CountDownLatch latch = new CountDownLatch(1); + final OperationFuture rv = new OperationFuture( + latch, operationTimeout); + Operation op = addOp(key, opFact.mutate(m, key, by, def, exp, + new OperationCallback() { + public void receivedStatus(OperationStatus s) { + rv.set(new Long(s.isSuccess() ? s.getMessage() : "-1")); + } + public void complete() { + latch.countDown(); + } + })); + rv.setOperation(op); + return rv; + } + + /** + * Asychronous increment. + * + * @param key key to increment + * @param by the amount to increment the value by + * @return a future with the incremented value, or -1 if the + * increment failed. + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncIncr(String key, int by) { + return asyncMutate(Mutator.incr, key, by, -1, 0); + } + + /** + * Asychronous increment. + * + * @param key key to increment + * @param by the amount to increment the value by + * @param def the default value (if the counter does not exist) + * @param exp the expiration of this object + * @return a future with the incremented value, or -1 if the + * increment failed. + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncIncr(String key, int by, long def, int exp) { + return asyncMutate(Mutator.incr, key, by, def, exp); + } + + /** + * Asynchronous decrement. + * + * @param key key to increment + * @param by the amount to increment the value by + * @return a future with the decremented value, or -1 if the + * increment failed. + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncDecr(String key, int by) { + return asyncMutate(Mutator.decr, key, by, -1, 0); + } + + /** + * Asynchronous decrement. + * + * @param key key to increment + * @param by the amount to increment the value by + * @param def the default value (if the counter does not exist) + * @param exp the expiration of this object + * @return a future with the decremented value, or -1 if the + * increment failed. + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future asyncDecr(String key, int by, long def, int exp) { + return asyncMutate(Mutator.decr, key, by, def, exp); + } + + /** + * Increment the given counter, returning the new value. + * + * @param key the key + * @param by the amount to increment + * @param def the default value (if the counter does not exist) + * @return the new value, or -1 if we were unable to increment or add + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public long incr(String key, int by, long def) { + return mutateWithDefault(Mutator.incr, key, by, def, 0); + } + + /** + * Decrement the given counter, returning the new value. + * + * @param key the key + * @param by the amount to decrement + * @param def the default value (if the counter does not exist) + * @return the new value, or -1 if we were unable to decrement or add + * @throws OperationTimeoutException if the global operation timeout is + * exceeded + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public long decr(String key, int by, long def) { + return mutateWithDefault(Mutator.decr, key, by, def, 0); + } + + /** + * Delete the given key from the cache. + * + *

+ * The hold argument specifies the amount of time in seconds (or Unix time + * until which) the client wishes the server to refuse "add" and "replace" + * commands with this key. For this amount of item, the item is put into a + * delete queue, which means that it won't possible to retrieve it by the + * "get" command, but "add" and "replace" command with this key will also + * fail (the "set" command will succeed, however). After the time passes, + * the item is finally deleted from server memory. + *

+ * + * @param key the key to delete + * @param hold how long the key should be unavailable to add commands + * + * @return whether or not the operation was performed + * @deprecated Hold values are no longer honored. + */ + @Deprecated + public Future delete(String key, int hold) { + return delete(key); + } + + /** + * Delete the given key from the cache. + * + * @param key the key to delete + * @return whether or not the operation was performed + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future delete(String key) { + final CountDownLatch latch=new CountDownLatch(1); + final OperationFuture rv=new OperationFuture(latch, + operationTimeout); + DeleteOperation op=opFact.delete(key, + new OperationCallback() { + public void receivedStatus(OperationStatus s) { + rv.set(s.isSuccess()); + } + public void complete() { + latch.countDown(); + }}); + rv.setOperation(op); + addOp(key, op); + return rv; + } + + /** + * Flush all caches from all servers with a delay of application. + * @param delay the period of time to delay, in seconds + * @return whether or not the operation was accepted + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future flush(final int delay) { + final AtomicReference flushResult= + new AtomicReference(null); + final ConcurrentLinkedQueue ops= + new ConcurrentLinkedQueue(); + CountDownLatch blatch = broadcastOp(new BroadcastOpFactory(){ + public Operation newOp(final MemcachedNode n, + final CountDownLatch latch) { + Operation op=opFact.flush(delay, new OperationCallback(){ + public void receivedStatus(OperationStatus s) { + flushResult.set(s.isSuccess()); + } + public void complete() { + latch.countDown(); + }}); + ops.add(op); + return op; + }}); + return new OperationFuture(blatch, flushResult, + operationTimeout) { + @Override + public boolean cancel(boolean ign) { + boolean rv=false; + for(Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + @Override + public boolean isCancelled() { + boolean rv=false; + for(Operation op : ops) { + rv |= op.isCancelled(); + } + return rv; + } + @Override + public boolean isDone() { + boolean rv=true; + for(Operation op : ops) { + rv &= op.getState() == OperationState.COMPLETE; + } + return rv || isCancelled(); + } + }; + } + + /** + * Flush all caches from all servers immediately. + * @return whether or not the operation was performed + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public Future flush() { + return flush(-1); + } + + public Set listSaslMechanisms() { + final ConcurrentMap rv + = new ConcurrentHashMap(); + + CountDownLatch blatch = broadcastOp(new BroadcastOpFactory() { + public Operation newOp(MemcachedNode n, + final CountDownLatch latch) { + return opFact.saslMechs(new OperationCallback() { + public void receivedStatus(OperationStatus status) { + for(String s : status.getMessage().split(" ")) { + rv.put(s, s); + } + } + public void complete() { + latch.countDown(); + } + }); + } + }); + + try { + blatch.await(); + } catch(InterruptedException e) { + Thread.currentThread().interrupt(); + } + + return rv.keySet(); + } + + private void logRunException(Exception e) { + if(shuttingDown) { + // There are a couple types of errors that occur during the + // shutdown sequence that are considered OK. Log at debug. + getLogger().debug("Exception occurred during shutdown", e); + } else { + getLogger().warn("Problem handling memcached IO", e); + } + } + + /** + * Infinitely loop processing IO. + */ + @Override + public void run() { + while(running) { + try { + conn.handleIO(); + } catch(IOException e) { + logRunException(e); + } catch(CancelledKeyException e) { + logRunException(e); + } catch(ClosedSelectorException e) { + logRunException(e); + } catch(IllegalStateException e) { + logRunException(e); + } + } + getLogger().info("Shut down memcached client"); + } + + /** + * Shut down immediately. + */ + public void shutdown() { + shutdown(-1, TimeUnit.MILLISECONDS); + } + + /** + * Shut down this client gracefully. + * + * @param timeout the amount of time time for shutdown + * @param unit the TimeUnit for the timeout + * @return result of the shutdown request + */ + public boolean shutdown(long timeout, TimeUnit unit) { + // Guard against double shutdowns (bug 8). + if(shuttingDown) { + getLogger().info("Suppressing duplicate attempt to shut down"); + return false; + } + shuttingDown=true; + String baseName=getName(); + setName(baseName + " - SHUTTING DOWN"); + boolean rv=false; + try { + // Conditionally wait + if(timeout > 0) { + setName(baseName + " - SHUTTING DOWN (waiting)"); + rv=waitForQueues(timeout, unit); + } + } finally { + // But always begin the shutdown sequence + try { + setName(baseName + " - SHUTTING DOWN (telling client)"); + running=false; + conn.shutdown(); + setName(baseName + " - SHUTTING DOWN (informed client)"); + tcService.shutdown(); + } catch (IOException e) { + getLogger().warn("exception while shutting down", e); + } + } + return rv; + } + + /** + * Wait for the queues to die down. + * + * @param timeout the amount of time time for shutdown + * @param unit the TimeUnit for the timeout + * @return result of the request for the wait + * @throws IllegalStateException in the rare circumstance where queue + * is too full to accept any more requests + */ + public boolean waitForQueues(long timeout, TimeUnit unit) { + CountDownLatch blatch = broadcastOp(new BroadcastOpFactory(){ + public Operation newOp(final MemcachedNode n, + final CountDownLatch latch) { + return opFact.noop( + new OperationCallback() { + public void complete() { + latch.countDown(); + } + public void receivedStatus(OperationStatus s) { + // Nothing special when receiving status, only + // necessary to complete the interface + } + }); + }}, conn.getLocator().getAll(), false); + try { + // XXX: Perhaps IllegalStateException should be caught here + // and the check retried. + return blatch.await(timeout, unit); + } catch (InterruptedException e) { + throw new RuntimeException("Interrupted waiting for queues", e); + } + } + + /** + * Add a connection observer. + * + * If connections are already established, your observer will be called + * with the address and -1. + * + * @param obs the ConnectionObserver you wish to add + * @return true if the observer was added. + */ + public boolean addObserver(ConnectionObserver obs) { + boolean rv = conn.addObserver(obs); + if(rv) { + for(MemcachedNode node : conn.getLocator().getAll()) { + if(node.isActive()) { + obs.connectionEstablished(node.getSocketAddress(), -1); + } + } + } + return rv; + } + + /** + * Remove a connection observer. + * + * @param obs the ConnectionObserver you wish to add + * @return true if the observer existed, but no longer does + */ + public boolean removeObserver(ConnectionObserver obs) { + return conn.removeObserver(obs); + } + + public void connectionEstablished(SocketAddress sa, int reconnectCount) { + if(authDescriptor != null) { + if (authDescriptor.authThresholdReached()) { + this.shutdown(); + } + authMonitor.authConnection(conn, opFact, authDescriptor, findNode(sa)); + } + } + + private MemcachedNode findNode(SocketAddress sa) { + MemcachedNode node = null; + for(MemcachedNode n : conn.getLocator().getAll()) { + if(n.getSocketAddress().equals(sa)) { + node = n; + } + } + assert node != null : "Couldn't find node connected to " + sa; + return node; + } + + public void connectionLost(SocketAddress sa) { + // Don't care. + } + + /** + * Returns current MemcachedConnection + * + * @return current MemcachedConnection + */ + public MemcachedConnection getMemcachedConnection() { + return this.conn; + } + + /** + * get current added queue size for mbean. + * + * @return current added queue size + */ + int getAddedQueueSize() { + return conn.getAddedQueueSize(); + } + + /** + * get all memcachednode from node locator for mbean + * + * @return all memcachednode from node locator + */ + Collection getAllNodes() { + return conn.getLocator().getAll(); + } + + /** + * get current local cache manager + * @return + */ + public LocalCacheManager getLocalCacheManager() { + return localCacheManager; + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/MemcachedClientIF.java b/src/main/java/net/spy/memcached/MemcachedClientIF.java new file mode 100644 index 000000000..230924d51 --- /dev/null +++ b/src/main/java/net/spy/memcached/MemcachedClientIF.java @@ -0,0 +1,157 @@ +package net.spy.memcached; + +import java.net.SocketAddress; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.internal.BulkFuture; +import net.spy.memcached.transcoders.Transcoder; + +/** + * This interface is provided as a helper for testing clients of the MemcachedClient. + */ +public interface MemcachedClientIF { + /** + * Maximum supported key length. + */ + int MAX_KEY_LENGTH = 250; + + Collection getAvailableServers(); + + Collection getUnavailableServers(); + + Transcoder getTranscoder(); + + NodeLocator getNodeLocator(); + + Future append(long cas, String key, Object val); + + Future append(long cas, String key, T val, + Transcoder tc); + + Future prepend(long cas, String key, Object val); + + Future prepend(long cas, String key, T val, + Transcoder tc); + + Future asyncCAS(String key, long casId, T value, + Transcoder tc); + + Future asyncCAS(String key, long casId, Object value); + + CASResponse cas(String key, long casId, T value, + Transcoder tc) throws OperationTimeoutException; + + CASResponse cas(String key, long casId, Object value) + throws OperationTimeoutException; + + Future add(String key, int exp, T o, Transcoder tc); + + Future add(String key, int exp, Object o); + + Future set(String key, int exp, T o, Transcoder tc); + + Future set(String key, int exp, Object o); + + Future replace(String key, int exp, T o, + Transcoder tc); + + Future replace(String key, int exp, Object o); + + Future asyncGet(String key, Transcoder tc); + + Future asyncGet(String key); + + Future> asyncGets(String key, + Transcoder tc); + + Future> asyncGets(String key); + + CASValue gets(String key, Transcoder tc) + throws OperationTimeoutException; + + CASValue gets(String key) throws OperationTimeoutException; + + T get(String key, Transcoder tc) + throws OperationTimeoutException; + + Object get(String key) throws OperationTimeoutException; + + BulkFuture> asyncGetBulk(Collection keys, + Iterator> tcs); + + BulkFuture> asyncGetBulk(Collection keys, + Transcoder tc); + + BulkFuture> asyncGetBulk(Collection keys); + + BulkFuture> asyncGetBulk(Transcoder tc, + String... keys); + + BulkFuture> asyncGetBulk(String... keys); + + Map getBulk(Collection keys, Transcoder tc) + throws OperationTimeoutException; + + Map getBulk(Collection keys) + throws OperationTimeoutException; + + Map getBulk(Transcoder tc, String... keys) + throws OperationTimeoutException; + + Map getBulk(String... keys) + throws OperationTimeoutException; + + Map getVersions(); + + Map> getStats(); + + Map> getStats(String prefix); + + long incr(String key, int by) throws OperationTimeoutException; + + long decr(String key, int by) throws OperationTimeoutException; + + long incr(String key, int by, long def, int exp) + throws OperationTimeoutException; + + long decr(String key, int by, long def, int exp) + throws OperationTimeoutException; + + Future asyncIncr(String key, int by); + + Future asyncDecr(String key, int by); + + long incr(String key, int by, long def) + throws OperationTimeoutException; + + long decr(String key, int by, long def) + throws OperationTimeoutException; + + Future delete(String key); + + Future flush(int delay); + + Future flush(); + + void shutdown(); + + boolean shutdown(long timeout, TimeUnit unit); + + boolean waitForQueues(long timeout, TimeUnit unit); + + boolean addObserver(ConnectionObserver obs); + + boolean removeObserver(ConnectionObserver obs); + + /** + * Get the set of SASL mechanisms supported by the servers. + * + * @return the union of all SASL mechanisms supported by the servers. + */ + Set listSaslMechanisms(); +} diff --git a/src/main/java/net/spy/memcached/MemcachedConnection.java b/src/main/java/net/spy/memcached/MemcachedConnection.java new file mode 100644 index 000000000..e3f36caae --- /dev/null +++ b/src/main/java/net/spy/memcached/MemcachedConnection.java @@ -0,0 +1,839 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.net.SocketException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.IdentityHashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingQueue; + +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.compat.log.LoggerFactory; +import net.spy.memcached.ops.KeyedOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationException; +import net.spy.memcached.ops.OperationState; + +/** + * Connection to a cluster of memcached servers. + */ +public final class MemcachedConnection extends SpyObject { + + // The number of empty selects we'll allow before assuming we may have + // missed one and should check the current selectors. This generally + // indicates a bug, but we'll check it nonetheless. + private static final int DOUBLE_CHECK_EMPTY = 256; + // The number of empty selects we'll allow before blowing up. It's too + // easy to write a bug that causes it to loop uncontrollably. This helps + // find those bugs and often works around them. + private static final int EXCESSIVE_EMPTY = 0x1000000; + + private volatile boolean shutDown=false; + // If true, optimization will collapse multiple sequential get ops + private final boolean shouldOptimize; + private Selector selector=null; + private final NodeLocator locator; + private final FailureMode failureMode; + // maximum amount of time to wait between reconnect attempts + private final long maxDelay; + private int emptySelects=0; + // AddedQueue is used to track the QueueAttachments for which operations + // have recently been queued. + private final ConcurrentLinkedQueue addedQueue; + // reconnectQueue contains the attachments that need to be reconnected + // The key is the time at which they are eligible for reconnect + private final SortedMap reconnectQueue; + private final Collection connObservers = + new ConcurrentLinkedQueue(); + private final OperationFactory opFact; + private final int timeoutExceptionThreshold; + + private BlockingQueue _nodeManageQueue = new LinkedBlockingQueue(); + private final ConnectionFactory f; + + /** + * Construct a memcached connection. + * + * @param bufSize the size of the buffer used for reading from the server + * @param f the factory that will provide an operation queue + * @param a the addresses of the servers to connect to + * + * @throws IOException if a connection attempt fails early + */ + public MemcachedConnection(int bufSize, ConnectionFactory f, + List a, Collection obs, + FailureMode fm, OperationFactory opfactory) + throws IOException { + this.f = f; + connObservers.addAll(obs); + reconnectQueue=new TreeMap(); + addedQueue=new ConcurrentLinkedQueue(); + failureMode = fm; + shouldOptimize = f.shouldOptimize(); + maxDelay = f.getMaxReconnectDelay(); + opFact = opfactory; + timeoutExceptionThreshold = f.getTimeoutExceptionThreshold(); + selector=Selector.open(); + List connections=new ArrayList(a.size()); + for(SocketAddress sa : a) { + connections.add(attachMemcachedNode(sa)); + } + locator=f.createLocator(connections); + } + + private boolean selectorsMakeSense() { + for(MemcachedNode qa : locator.getAll()) { + if(qa.getSk() != null && qa.getSk().isValid()) { + if(qa.getChannel().isConnected()) { + int sops=qa.getSk().interestOps(); + int expected=0; + if(qa.hasReadOp()) { + expected |= SelectionKey.OP_READ; + } + if(qa.hasWriteOp()) { + expected |= SelectionKey.OP_WRITE; + } + if(qa.getBytesRemainingToWrite() > 0) { + expected |= SelectionKey.OP_WRITE; + } + assert sops == expected : "Invalid ops: " + + qa + ", expected " + expected + ", got " + sops; + } else { + int sops=qa.getSk().interestOps(); + assert sops == SelectionKey.OP_CONNECT + : "Not connected, and not watching for connect: " + + sops; + } + } + } + getLogger().debug("Checked the selectors."); + return true; + } + + /** + * MemcachedClient calls this method to handle IO over the connections. + */ + public void handleIO() throws IOException { + if(shutDown) { + throw new IOException("No IO while shut down"); + } + + // Deal with all of the stuff that's been added, but may not be marked + // writable. + handleInputQueue(); + getLogger().debug("Done dealing with queue."); + + long delay=0; + if(!reconnectQueue.isEmpty()) { + long now=System.currentTimeMillis(); + long then=reconnectQueue.firstKey(); + delay=Math.max(then-now, 1); + } + getLogger().debug("Selecting with delay of %sms", delay); + assert selectorsMakeSense() : "Selectors don't make sense."; + int selected=selector.select(delay); + Set selectedKeys=selector.selectedKeys(); + + if(selectedKeys.isEmpty() && !shutDown) { + getLogger().debug("No selectors ready, interrupted: " + + Thread.interrupted()); + if(++emptySelects > DOUBLE_CHECK_EMPTY) { + for(SelectionKey sk : selector.keys()) { + getLogger().info("%s has %s, interested in %s", + sk, sk.readyOps(), sk.interestOps()); + if(sk.readyOps() != 0) { + getLogger().info("%s has a ready op, handling IO", sk); + handleIO(sk); + } else { + lostConnection((MemcachedNode)sk.attachment()); + } + } + assert emptySelects < EXCESSIVE_EMPTY + : "Too many empty selects"; + } + } else { + getLogger().debug("Selected %d, selected %d keys", + selected, selectedKeys.size()); + emptySelects=0; + + for(SelectionKey sk : selectedKeys) { + handleIO(sk); + } + + selectedKeys.clear(); + } + + // see if any connections blew up with large number of timeouts + for(SelectionKey sk : selector.keys()) { + MemcachedNode mn = (MemcachedNode)sk.attachment(); + if (mn.getContinuousTimeout() > timeoutExceptionThreshold) + { + getLogger().warn( + "%s exceeded continuous timeout threshold. >%s (%s)", + mn.getSocketAddress().toString(), timeoutExceptionThreshold, mn.getStatus()); + lostConnection(mn); + } + } + + // Deal with the memcached server group that's been added by CacheManager. + handleNodeManageQueue(); + + if(!shutDown && !reconnectQueue.isEmpty()) { + attemptReconnects(); + } + } + + public void updateConnections(List addrs) throws IOException { + List attachNodes = new ArrayList(); + List removeNodes = new ArrayList(); + + // Classify the incoming node list. + for (MemcachedNode node : locator.getAll()) { + if (addrs.contains((InetSocketAddress) node.getSocketAddress())) { + addrs.remove((InetSocketAddress) node.getSocketAddress()); + } else { + removeNodes.add(node); + } + } + + // Make connections to the newly added nodes. + for (SocketAddress sa : addrs) { + attachNodes.add(attachMemcachedNode(sa)); + } + + // Remove unavailable nodes in the reconnect queue. + for (MemcachedNode node : removeNodes) { + getLogger().info("old memcached node removed %s", node); + for (Entry each : reconnectQueue.entrySet()) { + if (node.equals(each.getValue())) { + reconnectQueue.remove(each.getKey()); + break; + } + } + } + + // Update the hash. + locator.update(attachNodes, removeNodes); + } + + MemcachedNode attachMemcachedNode(SocketAddress sa) throws IOException { + SocketChannel ch = SocketChannel.open(); + ch.configureBlocking(false); + // bufSize : 16384 (default value) + MemcachedNode qa = + f.createMemcachedNode(sa, ch, f.getReadBufSize()); + int ops = 0; + ch.socket().setTcpNoDelay(!f.useNagleAlgorithm()); + ch.socket().setReuseAddress(true); + // Initially I had attempted to skirt this by queueing every + // connect, but it considerably slowed down start time. + try { + if (ch.connect(sa)) { + getLogger().info("new memcached node connected to %s immediately", qa); + qa.connected(); + } else { + getLogger().info("new memcached node added %s to connect queue", qa); + ops = SelectionKey.OP_CONNECT; + } + qa.setSk(ch.register(selector, ops, qa)); + assert ch.isConnected() + || qa.getSk().interestOps() == SelectionKey.OP_CONNECT + : "Not connected, and not wanting to connect"; + } catch (SocketException e) { + getLogger().warn("new memcached socket error on initial connect"); + queueReconnect(qa); + } + return qa; + } + + public void putMemcachedQueue(String addrs) { + _nodeManageQueue.offer(addrs); + } + + // Handle the memcached server group that's been added by CacheManager. + void handleNodeManageQueue() throws IOException { + if (_nodeManageQueue.isEmpty()) { + return; + } + + // Get addresses from the queue + String addrs = _nodeManageQueue.poll(); + + // Update the memcached server group. + updateConnections(AddrUtil.getAddresses(addrs)); + } + + // Handle any requests that have been made against the client. + private void handleInputQueue() { + if(!addedQueue.isEmpty()) { + getLogger().debug("Handling queue"); + // If there's stuff in the added queue. Try to process it. + Collection toAdd=new HashSet(); + // Transfer the queue into a hashset. There are very likely more + // additions than there are nodes. + Collection todo=new HashSet(); + try { + MemcachedNode qa=null; + while((qa=addedQueue.remove()) != null) { + todo.add(qa); + } + } catch(NoSuchElementException e) { + // Found everything + } + + // Now process the queue. + for(MemcachedNode qa : todo) { + boolean readyForIO=false; + if(qa.isActive()) { + if(qa.getCurrentWriteOp() != null) { + readyForIO=true; + getLogger().debug("Handling queued write %s", qa); + } + } else { + toAdd.add(qa); + } + qa.copyInputQueue(); + if(readyForIO) { + try { + if(qa.getWbuf().hasRemaining()) { + handleWrites(qa.getSk(), qa); + } + } catch(IOException e) { + getLogger().warn("Exception handling write", e); + lostConnection(qa); + } + } + qa.fixupOps(); + } + addedQueue.addAll(toAdd); + } + } + + /** + * Add a connection observer. + * + * @return whether the observer was successfully added + */ + public boolean addObserver(ConnectionObserver obs) { + return connObservers.add(obs); + } + + /** + * Remove a connection observer. + * + * @return true if the observer existed and now doesn't + */ + public boolean removeObserver(ConnectionObserver obs) { + return connObservers.remove(obs); + } + + private void connected(MemcachedNode qa) { + assert qa.getChannel().isConnected() : "Not connected."; + int rt = qa.getReconnectCount(); + qa.connected(); + for(ConnectionObserver observer : connObservers) { + observer.connectionEstablished(qa.getSocketAddress(), rt); + } + } + + private void lostConnection(MemcachedNode qa) { + queueReconnect(qa); + for(ConnectionObserver observer : connObservers) { + observer.connectionLost(qa.getSocketAddress()); + } + } + + // Handle IO for a specific selector. Any IOException will cause a + // reconnect + private void handleIO(SelectionKey sk) { + MemcachedNode qa=(MemcachedNode)sk.attachment(); + try { + getLogger().debug( + "Handling IO for: %s (r=%s, w=%s, c=%s, op=%s)", + sk, sk.isReadable(), sk.isWritable(), + sk.isConnectable(), sk.attachment()); + if(sk.isConnectable()) { + getLogger().info("Connection state changed for %s", sk); + final SocketChannel channel=qa.getChannel(); + if(channel.finishConnect()) { + connected(qa); + addedQueue.offer(qa); + if(qa.getWbuf().hasRemaining()) { + handleWrites(sk, qa); + } + } else { + assert !channel.isConnected() : "connected"; + } + } else { + if(sk.isValid() && sk.isReadable()) { + handleReads(sk, qa); + } + if(sk.isValid() && sk.isWritable()) { + handleWrites(sk, qa); + } + } + } catch(ClosedChannelException e) { + // Note, not all channel closes end up here + if(!shutDown) { + getLogger().info("Closed channel and not shutting down. " + + "Queueing reconnect on %s", qa, e); + lostConnection(qa); + } + } catch(ConnectException e) { + // Failures to establish a connection should attempt a reconnect + // without signaling the observers. + getLogger().info("Reconnecting due to failure to connect to %s", + qa, e); + queueReconnect(qa); + } catch (OperationException e) { + qa.setupForAuth(); // noop if !shouldAuth + getLogger().info("Reconnection due to exception " + + "handling a memcached operation on %s. " + + "This may be due to an authentication failure.", qa, e); + lostConnection(qa); + } catch(Exception e) { + // Any particular error processing an item should simply + // cause us to reconnect to the server. + // + // One cause is just network oddness or servers + // restarting, which lead here with IOException + + qa.setupForAuth(); // noop if !shouldAuth + getLogger().info("Reconnecting due to exception on %s", qa, e); + lostConnection(qa); + } + qa.fixupOps(); + } + + private void handleWrites(SelectionKey sk, MemcachedNode qa) + throws IOException { + qa.fillWriteBuffer(shouldOptimize); + boolean canWriteMore=qa.getBytesRemainingToWrite() > 0; + while(canWriteMore) { + int wrote=qa.writeSome(); + qa.fillWriteBuffer(shouldOptimize); + canWriteMore = wrote > 0 && qa.getBytesRemainingToWrite() > 0; + } + } + + private void handleReads(SelectionKey sk, MemcachedNode qa) + throws IOException { + Operation currentOp = qa.getCurrentReadOp(); + ByteBuffer rbuf=qa.getRbuf(); + final SocketChannel channel = qa.getChannel(); + int read=channel.read(rbuf); + if (read < 0) { + // our model is to keep the connection alive for future ops + // so we'll queue a reconnect if disconnected via an IOException + throw new IOException("Disconnected unexpected, will reconnect."); + } + while(read > 0) { + getLogger().debug("Read %d bytes", read); + rbuf.flip(); + while(rbuf.remaining() > 0) { + if(currentOp == null) { + throw new IllegalStateException("No read operation."); + } + currentOp.readFromBuffer(rbuf); + if(currentOp.getState() == OperationState.COMPLETE) { + getLogger().debug( + "Completed read op: %s and giving the next %d bytes", + currentOp, rbuf.remaining()); + Operation op=qa.removeCurrentReadOp(); + assert op == currentOp + : "Expected to pop " + currentOp + " got " + op; + currentOp=qa.getCurrentReadOp(); + } + } + rbuf.clear(); + read=channel.read(rbuf); + } + } + + // Make a debug string out of the given buffer's values + static String dbgBuffer(ByteBuffer b, int size) { + StringBuilder sb=new StringBuilder(); + byte[] bytes=b.array(); + for(int i=0; i ops) { + for(Operation op : ops) { + op.cancel(); + } + } + + private void redistributeOperations(Collection ops) { + for(Operation op : ops) { + if(op instanceof KeyedOperation) { + KeyedOperation ko = (KeyedOperation)op; + int added = 0; + for(String k : ko.getKeys()) { + for(Operation newop : opFact.clone(ko)) { + addOperation(k, newop); + added++; + } + } + assert added > 0 + : "Didn't add any new operations when redistributing"; + } else { + // Cancel things that don't have definite targets. + op.cancel(); + } + } + } + + private void attemptReconnects() throws IOException { + final long now=System.currentTimeMillis(); + final Map seen= + new IdentityHashMap(); + final List rereQueue=new ArrayList(); + SocketChannel ch = null; + for(Iterator i= + reconnectQueue.headMap(now).values().iterator(); i.hasNext();) { + final MemcachedNode qa=i.next(); + i.remove(); + try { + if(!seen.containsKey(qa)) { + seen.put(qa, Boolean.TRUE); + getLogger().info("Reconnecting %s", qa); + ch=SocketChannel.open(); + ch.configureBlocking(false); + int ops=0; + if(ch.connect(qa.getSocketAddress())) { + getLogger().info("Immediately reconnected to %s", qa); + assert ch.isConnected(); + } else { + ops=SelectionKey.OP_CONNECT; + } + qa.registerChannel(ch, ch.register(selector, ops, qa)); + assert qa.getChannel() == ch : "Channel was lost."; + } else { + getLogger().debug( + "Skipping duplicate reconnect request for %s", qa); + } + } catch(SocketException e) { + getLogger().warn("Error on reconnect", e); + rereQueue.add(qa); + } + catch (Exception e) { + getLogger().error("Exception on reconnect, lost node %s", qa, e); + } finally { + //it's possible that above code will leak file descriptors under abnormal + //conditions (when ch.open() fails and throws IOException. + //always close non connected channel + if (ch != null && !ch.isConnected() + && !ch.isConnectionPending()) { + try { + ch.close(); + } catch (IOException x) { + getLogger().error("Exception closing channel: %s", qa, x); + } + } + } + } + // Requeue any fast-failed connects. + for(MemcachedNode n : rereQueue) { + queueReconnect(n); + } + } + + /** + * Get the node locator used by this connection. + */ + NodeLocator getLocator() { + return locator; + } + + Selector getSelector() { + return selector; + } + + /** + * Add an operation to the given connection. + * + * @param key the key the operation is operating upon + * @param o the operation + */ + public void addOperation(final String key, final Operation o) { + MemcachedNode placeIn=null; + MemcachedNode primary = locator.getPrimary(key); + if(primary.isActive() || failureMode == FailureMode.Retry) { + placeIn=primary; + } else if(failureMode == FailureMode.Cancel) { + o.setHandlingNode(primary); + o.cancel(); + } else { + // Look for another node in sequence that is ready. + for(Iterator i=locator.getSequence(key); + placeIn == null && i.hasNext(); ) { + MemcachedNode n=i.next(); + if(n.isActive()) { + placeIn=n; + } + } + // If we didn't find an active node, queue it in the primary node + // and wait for it to come back online. + if(placeIn == null) { + placeIn = primary; + } + } + + assert o.isCancelled() || placeIn != null + : "No node found for key " + key; + if(placeIn != null) { + addOperation(placeIn, o); + } else { + assert o.isCancelled() : "No not found for " + + key + " (and not immediately cancelled)"; + } + } + + public void insertOperation(final MemcachedNode node, final Operation o) { + o.setHandlingNode(node); + o.initialize(); + node.insertOp(o); + addedQueue.offer(node); + Selector s=selector.wakeup(); + assert s == selector : "Wakeup returned the wrong selector."; + getLogger().debug("Added %s to %s", o, node); + } + + public void addOperation(final MemcachedNode node, final Operation o) { + o.setHandlingNode(node); + o.initialize(); + node.addOp(o); + addedQueue.offer(node); + Selector s=selector.wakeup(); + assert s == selector : "Wakeup returned the wrong selector."; + getLogger().debug("Added %s to %s", o, node); + } + + public void addOperations(final Map ops) { + + for(Map.Entry me : ops.entrySet()) { + final MemcachedNode node=me.getKey(); + Operation o=me.getValue(); + o.setHandlingNode(node); + o.initialize(); + node.addOp(o); + addedQueue.offer(node); + } + Selector s=selector.wakeup(); + assert s == selector : "Wakeup returned the wrong selector."; + } + + /** + * Broadcast an operation to all nodes. + */ + public CountDownLatch broadcastOperation(BroadcastOpFactory of) { + return broadcastOperation(of, locator.getAll()); + } + + /** + * Broadcast an operation to a specific collection of nodes. + */ + public CountDownLatch broadcastOperation(final BroadcastOpFactory of, + Collection nodes) { + final CountDownLatch latch=new CountDownLatch(locator.getAll().size()); + for(MemcachedNode node : nodes) { + Operation op = of.newOp(node, latch); + op.initialize(); + node.addOp(op); + op.setHandlingNode(node); + addedQueue.offer(node); + } + Selector s=selector.wakeup(); + assert s == selector : "Wakeup returned the wrong selector."; + return latch; + } + + /** + * Shut down all of the connections. + */ + public void shutdown() throws IOException { + shutDown=true; + Selector s=selector.wakeup(); + assert s == selector : "Wakeup returned the wrong selector."; + for(MemcachedNode qa : locator.getAll()) { + qa.shutdown(); + } + selector.close(); + getLogger().debug("Shut down selector %s", selector); + } + + @Override + public String toString() { + StringBuilder sb=new StringBuilder(); + sb.append("{MemcachedConnection to"); + for(MemcachedNode qa : locator.getAll()) { + sb.append(" "); + sb.append(qa.getSocketAddress()); + } + sb.append("}"); + return sb.toString(); + } + + /** + * helper method: increase timeout count on node attached to this op + * + * @param op + */ + public static void opTimedOut(Operation op) { + MemcachedConnection.setTimeout(op, true); + } + + /** + * helper method: reset timeout counter + * + * @param op + */ + public static void opSucceeded(Operation op) { + MemcachedConnection.setTimeout(op, false); + } + + /** + * helper method: do some error checking and set timeout boolean + * + * @param op + * @param isTimeout + */ + private static void setTimeout(Operation op, boolean isTimeout) { + try { + if (op == null) { + LoggerFactory.getLogger(MemcachedConnection.class).debug("op is null."); + return; // op may be null in some cases, e.g. flush + } + MemcachedNode node = op.getHandlingNode(); + if (node == null) { + LoggerFactory.getLogger(MemcachedConnection.class).debug("handling node for operation is not set"); + } + else { + node.setContinuousTimeout(isTimeout); + } + } catch (Exception e) { + LoggerFactory.getLogger(MemcachedConnection.class).error(e.getMessage()); + } + } + + /** + * find memcachednode for key + * @param key + * @return + */ + public MemcachedNode findNodeByKey(String key) { + MemcachedNode placeIn = null; + MemcachedNode primary = locator.getPrimary(key); + if (primary.isActive() || failureMode == FailureMode.Retry) { + placeIn = primary; + } else { + for (Iterator i = locator.getSequence(key); placeIn == null + && i.hasNext();) { + MemcachedNode n = i.next(); + if (n.isActive()) { + placeIn = n; + } + } + if (placeIn == null) { + placeIn = primary; + } + } + return placeIn; + } + + public int getAddedQueueSize() { + return addedQueue.size(); + } +} diff --git a/src/main/java/net/spy/memcached/MemcachedNode.java b/src/main/java/net/spy/memcached/MemcachedNode.java new file mode 100644 index 000000000..86ab04b1c --- /dev/null +++ b/src/main/java/net/spy/memcached/MemcachedNode.java @@ -0,0 +1,233 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.io.IOException; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.Collection; + +import net.spy.memcached.ops.Operation; + +/** + * Interface defining a connection to a memcached server. + */ +public interface MemcachedNode { + + /** + * Move all of the operations delivered via addOperation into the internal + * write queue. + */ + void copyInputQueue(); + + /** + * Extract all queued items for this node destructively. + * + * This is useful for redistributing items. + */ + Collection destroyInputQueue(); + + /** + * Clear the queue of currently processing operations by either cancelling + * them or setting them up to be reapplied after a reconnect. + */ + void setupResend(); + + /** + * Fill the write buffer with data from the next operations in the queue. + * + * @param optimizeGets if true, combine sequential gets into a single + * multi-key get + */ + void fillWriteBuffer(boolean optimizeGets); + + /** + * Transition the current write item into a read state. + */ + void transitionWriteItem(); + + /** + * Get the operation at the top of the queue that is requiring input. + */ + Operation getCurrentReadOp(); + + /** + * Remove the operation at the top of the queue that is requiring input. + */ + Operation removeCurrentReadOp(); + + /** + * Get the operation at the top of the queue that has information available + * to write. + */ + Operation getCurrentWriteOp(); + + /** + * Remove the operation at the top of the queue that has information + * available to write. + */ + Operation removeCurrentWriteOp(); + + /** + * True if an operation is available to read. + */ + boolean hasReadOp(); + + /** + * True if an operation is available to write. + */ + boolean hasWriteOp(); + + /** + * Add an operation to the queue. Authentication operations should + * never be added to the queue, but this is not checked. + */ + void addOp(Operation op); + + /** + * Insert an operation to the beginning of the queue. + * + * This method is meant to be invoked rarely. + */ + void insertOp(Operation o); + + /** + * Compute the appropriate selection operations for the channel this + * MemcachedNode holds to the server. + */ + int getSelectionOps(); + + /** + * Get the buffer used for reading data from this node. + */ + ByteBuffer getRbuf(); + + /** + * Get the buffer used for writing data to this node. + */ + ByteBuffer getWbuf(); + + /** + * Get the SocketAddress of the server to which this node is connected. + */ + SocketAddress getSocketAddress(); + + /** + * True if this node is active. i.e. is is currently connected + * and expected to be able to process requests + */ + boolean isActive(); + + /** + * Notify this node that it will be reconnecting. + */ + void reconnecting(); + + /** + * Notify this node that it has reconnected. + */ + void connected(); + + /** + * Get the current reconnect count. + */ + int getReconnectCount(); + + /** + * Register a channel with this node. + */ + void registerChannel(SocketChannel ch, SelectionKey selectionKey); + + /** + * Set the SocketChannel this node uses. + */ + void setChannel(SocketChannel to); + + /** + * Get the SocketChannel for this connection. + */ + SocketChannel getChannel(); + + /** + * Set the selection key for this node. + */ + void setSk(SelectionKey to); + + /** + * Get the selection key from this node. + */ + SelectionKey getSk(); + + /** + * Get the number of bytes remaining to write. + */ + int getBytesRemainingToWrite(); + + /** + * Write some bytes and return the number of bytes written. + * @return the number of bytes written + * @throws IOException if there's a problem writing + */ + int writeSome() throws IOException; + + /** + * Fix up the selection ops on the selection key. + */ + void fixupOps(); + + /** + * Let the node know that auth is complete. Typically this would + * mean the node can start processing and accept new operations to + * its input queue. + */ + void authComplete(); + + /** + * Tell a node to set up for authentication. Typically this would + * mean blocking additions to the queue. In a reconnect situation + * this may mean putting any queued operations on hold to get to + * an auth complete state. + */ + void setupForAuth(); + + /** + * Count 'time out' exceptions to drop connections that fail perpetually + * @param timedOut + */ + void setContinuousTimeout(boolean timedOut); + + int getContinuousTimeout(); + + /** + * Is this a fake node? + * @return + */ + boolean isFake(); + + /** + * Shutdown the node + */ + void shutdown() throws IOException; + + /** + * get operation queue status + * @return + */ + String getStatus(); +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/MemcachedNodeROImpl.java b/src/main/java/net/spy/memcached/MemcachedNodeROImpl.java new file mode 100644 index 000000000..665553713 --- /dev/null +++ b/src/main/java/net/spy/memcached/MemcachedNodeROImpl.java @@ -0,0 +1,188 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package net.spy.memcached; + +import java.io.IOException; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.Collection; + +import net.spy.memcached.ops.Operation; + +class MemcachedNodeROImpl implements MemcachedNode { + + private final MemcachedNode root; + + public MemcachedNodeROImpl(MemcachedNode n) { + super(); + root=n; + } + + @Override + public String toString() { + return root.toString(); + } + + public void addOp(Operation op) { + throw new UnsupportedOperationException(); + } + + public void insertOp(Operation op) { + throw new UnsupportedOperationException(); + } + + public void connected() { + throw new UnsupportedOperationException(); + } + + public void copyInputQueue() { + throw new UnsupportedOperationException(); + } + + public void fillWriteBuffer(boolean optimizeGets) { + throw new UnsupportedOperationException(); + } + + public void fixupOps() { + throw new UnsupportedOperationException(); + } + + public int getBytesRemainingToWrite() { + return root.getBytesRemainingToWrite(); + } + + public SocketChannel getChannel() { + throw new UnsupportedOperationException(); + } + + public Operation getCurrentReadOp() { + throw new UnsupportedOperationException(); + } + + public Operation getCurrentWriteOp() { + throw new UnsupportedOperationException(); + } + + public ByteBuffer getRbuf() { + throw new UnsupportedOperationException(); + } + + public int getReconnectCount() { + return root.getReconnectCount(); + } + + public int getSelectionOps() { + return root.getSelectionOps(); + } + + public SelectionKey getSk() { + throw new UnsupportedOperationException(); + } + + public SocketAddress getSocketAddress() { + return root.getSocketAddress(); + } + + public ByteBuffer getWbuf() { + throw new UnsupportedOperationException(); + } + + public boolean hasReadOp() { + return root.hasReadOp(); + } + + public boolean hasWriteOp() { + return root.hasReadOp(); + } + + public boolean isActive() { + return root.isActive(); + } + + public void reconnecting() { + throw new UnsupportedOperationException(); + } + + public void registerChannel(SocketChannel ch, SelectionKey selectionKey) { + throw new UnsupportedOperationException(); + } + + public Operation removeCurrentReadOp() { + throw new UnsupportedOperationException(); + } + + public Operation removeCurrentWriteOp() { + throw new UnsupportedOperationException(); + } + + public void setChannel(SocketChannel to) { + throw new UnsupportedOperationException(); + } + + public void setSk(SelectionKey to) { + throw new UnsupportedOperationException(); + } + + public void setupResend() { + throw new UnsupportedOperationException(); + } + + public void transitionWriteItem() { + throw new UnsupportedOperationException(); + } + + public int writeSome() throws IOException { + throw new UnsupportedOperationException(); + } + + public Collection destroyInputQueue() { + throw new UnsupportedOperationException(); + } + + public void authComplete() { + throw new UnsupportedOperationException(); + } + + public void setupForAuth() { + throw new UnsupportedOperationException(); + } + + public int getContinuousTimeout() { + throw new UnsupportedOperationException(); + } + + public void setContinuousTimeout(boolean isIncrease) { + throw new UnsupportedOperationException(); + } + + public boolean isFake() { + throw new UnsupportedOperationException(); + } + + public void shutdown() throws IOException { + throw new UnsupportedOperationException(); + } + + public String getStatus() { + throw new UnsupportedOperationException(); + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/NodeLocator.java b/src/main/java/net/spy/memcached/NodeLocator.java new file mode 100644 index 000000000..76ef50c53 --- /dev/null +++ b/src/main/java/net/spy/memcached/NodeLocator.java @@ -0,0 +1,60 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.Collection; +import java.util.Iterator; + +/** + * Interface for locating a node by hash value. + */ +public interface NodeLocator { + + /** + * Get the primary location for the given key. + * + * @param k the object key + * @return the QueueAttachment containing the primary storage for a key + */ + MemcachedNode getPrimary(String k); + + /** + * Get an iterator over the sequence of nodes that make up the backup + * locations for a given key. + * + * @param k the object key + * @return the sequence of backup nodes. + */ + Iterator getSequence(String k); + + /** + * Get all memcached nodes. This is useful for broadcasting messages. + */ + Collection getAll(); + + /** + * Create a read-only copy of this NodeLocator. + */ + NodeLocator getReadonlyCopy(); + + /** + * Update all memcached nodes. Note that this feature is + * only available in ArcusKetamaNodeLocator. + * @param nodes + */ + void update(Collection toAttach, Collection toDelete); +} diff --git a/src/main/java/net/spy/memcached/NotExistsServiceCodeException.java b/src/main/java/net/spy/memcached/NotExistsServiceCodeException.java new file mode 100644 index 000000000..bd5b720e2 --- /dev/null +++ b/src/main/java/net/spy/memcached/NotExistsServiceCodeException.java @@ -0,0 +1,27 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +public class NotExistsServiceCodeException extends RuntimeException { + + private static final long serialVersionUID = -1461409015284668292L; + + public NotExistsServiceCodeException(String message) { + super(message); + } + +} diff --git a/src/main/java/net/spy/memcached/OperationFactory.java b/src/main/java/net/spy/memcached/OperationFactory.java new file mode 100644 index 000000000..c995a63ba --- /dev/null +++ b/src/main/java/net/spy/memcached/OperationFactory.java @@ -0,0 +1,473 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; + +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.BTreeFindPosition; +import net.spy.memcached.collection.BTreeGetBulk; +import net.spy.memcached.collection.BTreeGetByPosition; +import net.spy.memcached.collection.BTreeSMGet; +import net.spy.memcached.collection.BTreeStoreAndGet; +import net.spy.memcached.collection.CollectionBulkStore; +import net.spy.memcached.collection.CollectionCount; +import net.spy.memcached.collection.CollectionCreate; +import net.spy.memcached.collection.CollectionDelete; +import net.spy.memcached.collection.CollectionExist; +import net.spy.memcached.collection.CollectionGet; +import net.spy.memcached.collection.CollectionMutate; +import net.spy.memcached.collection.CollectionPipedStore; +import net.spy.memcached.collection.CollectionPipedUpdate; +import net.spy.memcached.collection.CollectionStore; +import net.spy.memcached.collection.CollectionUpdate; +import net.spy.memcached.collection.SetPipedExist; +import net.spy.memcached.ops.BTreeFindPositionOperation; +import net.spy.memcached.ops.BTreeGetBulkOperation; +import net.spy.memcached.ops.BTreeGetByPositionOperation; +import net.spy.memcached.ops.BTreeSortMergeGetOperation; +import net.spy.memcached.ops.BTreeStoreAndGetOperation; +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.CollectionBulkStoreOperation; +import net.spy.memcached.ops.CollectionCountOperation; +import net.spy.memcached.ops.CollectionCreateOperation; +import net.spy.memcached.ops.CollectionDeleteOperation; +import net.spy.memcached.ops.CollectionExistOperation; +import net.spy.memcached.ops.CollectionGetOperation; +import net.spy.memcached.ops.CollectionMutateOperation; +import net.spy.memcached.ops.CollectionPipedExistOperation; +import net.spy.memcached.ops.CollectionPipedStoreOperation; +import net.spy.memcached.ops.CollectionPipedUpdateOperation; +import net.spy.memcached.ops.CollectionStoreOperation; +import net.spy.memcached.ops.CollectionUpdateOperation; +import net.spy.memcached.ops.ConcatenationOperation; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.ExtendedBTreeGetOperation; +import net.spy.memcached.ops.FlushOperation; +import net.spy.memcached.ops.GetAttrOperation; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.KeyedOperation; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.NoopOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.SASLAuthOperation; +import net.spy.memcached.ops.SASLMechsOperation; +import net.spy.memcached.ops.SASLStepOperation; +import net.spy.memcached.ops.SetAttrOperation; +import net.spy.memcached.ops.StatsOperation; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.ops.StoreType; +import net.spy.memcached.ops.VersionOperation; + +/** + * Factory that builds operations for protocol handlers. + */ +public interface OperationFactory { + + /** + * Create a NOOP operation. + * + * @param cb the operation callback + * @return the new NoopOperation + */ + NoopOperation noop(OperationCallback cb); + + /** + * Create a deletion operation. + * + * @param key the key to delete + * @param operationCallback the status callback + * @return the new DeleteOperation + */ + DeleteOperation delete(String key, OperationCallback operationCallback); + + /** + * Create a flush operation. + * + * @param delay delay until flush. + * @param operationCallback the status callback + * @return the new FlushOperation + */ + FlushOperation flush(int delay, OperationCallback operationCallback); + + /** + * Create a get operation. + * + * @param key the key to get + * @param callback the callback that will contain the results + * @return a new GetOperation + */ + GetOperation get(String key, GetOperation.Callback callback); + + /** + * Create a gets operation. + * + * @param key the key to get + * @param callback the callback that will contain the results + * @return a new GetsOperation + */ + GetsOperation gets(String key, GetsOperation.Callback callback); + + + /** + * Create a get operation. + * + * @param keys the collection of keys to get + * @param cb the callback that will contain the results + * @return a new GetOperation + */ + GetOperation get(Collection keys, GetOperation.Callback cb); + + /** + * Create a mutator operation. + * + * @param m the mutator type + * @param key the mutatee key + * @param by the amount to increment or decrement + * @param def the default value + * @param exp expiration in case we need to default (0 if no default) + * @param cb the status callback + * @return the new mutator operation + */ + MutatorOperation mutate(Mutator m, String key, int by, + long def, int exp, OperationCallback cb); + + /** + * Get a new StatsOperation. + * + * @param arg the stat parameter (see protocol docs) + * @param cb the stats callback + * @return the new StatsOperation + */ + StatsOperation stats(String arg, StatsOperation.Callback cb); + + /** + * Create a store operation. + * + * @param storeType the type of store operation + * @param key the key to store + * @param flags the storage flags + * @param exp the expiration time + * @param data the data + * @param cb the status callback + * @return the new store operation + */ + StoreOperation store(StoreType storeType, String key, int flags, int exp, + byte[] data, OperationCallback cb); + + /** + * Get a concatenation operation. + * + * @param catType the type of concatenation to perform. + * @param key the key + * @param casId the CAS value for an atomic compare-and-cat + * @param data the data to store + * @param cb a callback for reporting the status + * @return thew new ConcatenationOperation + */ + ConcatenationOperation cat(ConcatenationType catType, long casId, + String key, byte[] data, OperationCallback cb); + + /** + * Create a CAS operation. + * + * @param key the key to store + * @param casId the CAS identifier value (from a gets operation) + * @param flags the storage flags + * @param exp the expiration time + * @param data the data + * @param cb the status callback + * @return the new store operation + */ + CASOperation cas(StoreType t, String key, long casId, int flags, + int exp, byte[] data, OperationCallback cb); + + /** + * Create a new version operation. + */ + VersionOperation version(OperationCallback cb); + + /** + * Create a new SASL mechs operation. + */ + SASLMechsOperation saslMechs(OperationCallback cb); + + /** + * Create a new sasl auth operation. + */ + SASLAuthOperation saslAuth(String[] mech, String serverName, + Map props, CallbackHandler cbh, OperationCallback cb); + + /** + * Create a new sasl step operation. + */ + SASLStepOperation saslStep(String[] mech, byte[] challenge, + String serverName, Map props, CallbackHandler cbh, + OperationCallback cb); + + /** + * + * @param key + * @param attrs + * @param cb + * @return + */ + SetAttrOperation setAttr(String key, Attributes attrs, + OperationCallback cb); + + /** + * + * @param key + * @param cb + * @return + */ + GetAttrOperation getAttr(String key, GetAttrOperation.Callback cb); + + /** + * + * @param key + * @param subkey + * @param collectionStore + * @param data + * @param cb + * @return + */ + CollectionStoreOperation collectionStore(String key, String subkey, + CollectionStore collectionStore, byte[] data, OperationCallback cb); + + /** + * + * @param key + * @param store + * @param cb + * @return + */ + CollectionPipedStoreOperation collectionPipedStore(String key, + CollectionPipedStore store, OperationCallback cb); + + /** + * + * @param key + * @param collectionGet + * @param cb + * @return + */ + CollectionGetOperation collectionGet(String key, + CollectionGet collectionGet, CollectionGetOperation.Callback cb); + + /** + * + * @param key + * @param collectionGet + * @param cb + * @return + */ + CollectionGetOperation collectionGet2(String key, + CollectionGet collectionGet, ExtendedBTreeGetOperation.Callback cb); + + /** + * + * @param key + * @param collectionDelete + * @param cb + * @return + */ + CollectionDeleteOperation collectionDelete(String key, + CollectionDelete collectionDelete, OperationCallback cb); + + /** + * + * @param key + * @param subkey + * @param setPipedExist + * @param data + * @param cb + * @return + */ + CollectionExistOperation collectionExist(String key, String subkey, + CollectionExist collectionExist, OperationCallback cb); + + /** + * Clone an operation. + * + *

+ * This is used for requeueing operations after a server is found to be + * down. + *

+ * + *

+ * Note that it returns more than one operation because a multi-get + * could potentially need to be played against a large number of + * underlying servers. In this case, there's a separate operation for + * each, and callback fa\u00E7ade to reassemble them. It is left up to + * the operation pipeline to perform whatever optimization is required + * to turn these back into multi-gets. + *

+ * + * @param op the operation to clone + * @return a new operation for each key in the original operation + */ + Collection clone(KeyedOperation op); + + /** + * + * @param key + * @param collectionCreate + * @param cb + * @return + */ + CollectionCreateOperation collectionCreate(String key, + CollectionCreate collectionCreate, OperationCallback cb); + + /** + * + * @param key + * @param collectionCount + * @param cb + * @return + */ + CollectionCountOperation collectionCount(String key, + CollectionCount collectionCount, OperationCallback cb); + + /** + * + * @param prefix + * @param delay delay until flush. + * @param noreply + * @param cb + * @return + */ + FlushOperation flush(String prefix, int delay, boolean noreply, OperationCallback cb); + + /** + * + * @param smget + * @param cb + * @return + */ + BTreeSortMergeGetOperation bopsmget(BTreeSMGet smGet, BTreeSortMergeGetOperation.Callback cb); + + /** + * + * @param key + * @param subkey + * @param collectionStore + * @param data + * @param cb + * @return + */ + CollectionStoreOperation collectionUpsert(String key, String subkey, + CollectionStore collectionStore, byte[] data, OperationCallback cb); + + /** + * + * @param key + * @param subkey + * @param collectionUpdate + * @param data + * @param cb + * @return + */ + CollectionUpdateOperation collectionUpdate(String key, String subkey, + CollectionUpdate collectionUpdate, byte[] data, OperationCallback cb); + + /** + * + * @param key + * @param update + * @param cb + * @return + */ + CollectionPipedUpdateOperation collectionPipedUpdate(String key, + CollectionPipedUpdate update, OperationCallback cb); + + /** + * + * @param key + * @param subkey + * @param collectionMutate + * @param cb + * @return + */ + CollectionMutateOperation collectionMutate(String key, String subkey, + CollectionMutate collectionMutate, OperationCallback cb); + + /** + * + * @param key + * @param exist + * @param cb + * @return + */ + CollectionPipedExistOperation collectionPipedExist(String key, + SetPipedExist exist, OperationCallback cb); + + /** + * + * @param key + * @param store + * @param cb + * @return + */ + CollectionBulkStoreOperation collectionBulkStore(List key, + CollectionBulkStore store, OperationCallback cb); + + /** + * + * @param get + * @param cb + * @return + */ + BTreeGetBulkOperation bopGetBulk(BTreeGetBulk get, BTreeGetBulkOperation.Callback cb); + + /** + * + * @param key + * @param get + * @param cb + * @return + */ + BTreeGetByPositionOperation bopGetByPosition(String key, BTreeGetByPosition get, OperationCallback cb); + + /** + * + * @param key + * @param get + * @param cb + * @return + */ + BTreeFindPositionOperation bopFindPosition(String key, BTreeFindPosition get, OperationCallback cb); + + /** + * + * @param key + * @param get + * @param cb + * @return + */ + BTreeStoreAndGetOperation bopStoreAndGet(String key, + BTreeStoreAndGet get, byte[] dataToStore, OperationCallback cb); + +} diff --git a/src/main/java/net/spy/memcached/OperationTimeoutException.java b/src/main/java/net/spy/memcached/OperationTimeoutException.java new file mode 100755 index 000000000..b12a0fac7 --- /dev/null +++ b/src/main/java/net/spy/memcached/OperationTimeoutException.java @@ -0,0 +1,37 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +/** + * Thrown by {@link MemcachedClient} when any internal operations timeout. + * + * @author Ray Krueger + * @see net.spy.memcached.ConnectionFactory#getOperationTimeout() + */ +public class OperationTimeoutException extends RuntimeException { + + private static final long serialVersionUID = 1479557202445843619L; + + public OperationTimeoutException(String message) { + super(message); + } + + public OperationTimeoutException(String message, Throwable cause) { + super(message, cause); + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/PartitionedList.java b/src/main/java/net/spy/memcached/PartitionedList.java new file mode 100644 index 000000000..5f39a43f3 --- /dev/null +++ b/src/main/java/net/spy/memcached/PartitionedList.java @@ -0,0 +1,57 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.AbstractList; +import java.util.List; + +public class PartitionedList extends AbstractList> { + + private final List list; + private final int size; + + public PartitionedList(List list, int size) { + this.list = list; + this.size = size; + } + + @Override + public List get(int index) { + int listSize = size(); + if (listSize < 0) + throw new IllegalArgumentException("negative size: " + listSize); + if (index < 0) + throw new IndexOutOfBoundsException("index " + index + + " must not be negative"); + if (index >= listSize) + throw new IndexOutOfBoundsException("index " + index + + " must be less than size " + listSize); + int start = index * size; + int end = Math.min(start + size, list.size()); + return list.subList(start, end); + } + + @Override + public int size() { + return (list.size() + size - 1) / size; + } + + @Override + public boolean isEmpty() { + return list.isEmpty(); + } +} diff --git a/src/main/java/net/spy/memcached/PartitionedMap.java b/src/main/java/net/spy/memcached/PartitionedMap.java new file mode 100644 index 000000000..397f33b0d --- /dev/null +++ b/src/main/java/net/spy/memcached/PartitionedMap.java @@ -0,0 +1,83 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class PartitionedMap extends AbstractList> { + + private final List> mapList; + + public PartitionedMap(Map map, int size) { + int expectSize = (map.size() + size - 1) / size; + int splitBy = size; + int parsedSize = 0; + + int mapSize = map.size(); + int counter = 0; + int listIndex = 0; + + mapList = new ArrayList>(expectSize); + for (int i = 0; i < expectSize; i++) { + mapList.add(new HashMap()); + } + + for (Map.Entry entry : map.entrySet()) { + parsedSize++; + counter++; + + mapList.get(listIndex).put(entry.getKey(), entry.getValue()); + + if (parsedSize == splitBy || counter == mapSize) { + parsedSize = 0; + listIndex++; + } + } + } + + @Override + public Map get(int index) { + int listSize = size(); + + if (listSize < 0) + throw new IllegalArgumentException("negative size: " + listSize); + + if (index < 0) + throw new IndexOutOfBoundsException("index " + index + + " must not be negative"); + + if (index >= listSize) + throw new IndexOutOfBoundsException("index " + index + + " must be less than size " + listSize); + + return mapList.get(index); + } + + @Override + public int size() { + return mapList.size(); + } + + @Override + public boolean isEmpty() { + return mapList.isEmpty(); + } +} diff --git a/src/main/java/net/spy/memcached/StatisticsHandler.java b/src/main/java/net/spy/memcached/StatisticsHandler.java new file mode 100644 index 000000000..853511f85 --- /dev/null +++ b/src/main/java/net/spy/memcached/StatisticsHandler.java @@ -0,0 +1,198 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; + +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.DynamicMBean; +import javax.management.InvalidAttributeValueException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.ReflectionException; + +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.protocol.TCPMemcachedNodeImpl; + +/** + * Arcus java client statistics mbean + */ +public class StatisticsHandler extends SpyObject implements DynamicMBean { + + private static final String ADDED_Q = "addedQ"; + private static final String INPUT_Q = "inputQ"; + private static final String WRITE_Q = "writeQ"; + private static final String READ_Q = "readQ"; + private static final String RECONN_CNT = "reconnectCount"; + private static final String CONT_TIMEOUT = "continuousTimeout"; + + private static final String DELIMETER = "-"; + + private final ArcusClient client; + private final Map nodes = new ConcurrentHashMap(); + + public StatisticsHandler(final ArcusClient client) { + this.client = client; + + Collection allNodes = ((MemcachedClient) client) + .getAllNodes(); + + for (MemcachedNode node : allNodes) { + nodes.put(node.getSocketAddress().toString(), node); + } + } + + @Override + public Object getAttribute(String attribute) + throws AttributeNotFoundException, MBeanException, + ReflectionException { + + if (attribute.contains(ADDED_Q)) { + return ((MemcachedClient) client).getAddedQueueSize(); + } + + TCPMemcachedNodeImpl node = (TCPMemcachedNodeImpl) getNode(attribute); + + if (node == null) { + return null; + } + + if (attribute.contains(RECONN_CNT)) { + return node.getReconnectCount(); + } + + if (attribute.contains(CONT_TIMEOUT)) { + return node.getContinuousTimeout(); + } + + if (attribute.contains(INPUT_Q)) { + return node.getInputQueueSize(); + } + + if (attribute.contains(READ_Q)) { + return node.getReadQueueSize(); + } + + if (attribute.contains(WRITE_Q)) { + return node.getWriteQueueSize(); + } + + throw new AttributeNotFoundException("Atrribute '" + attribute + + "' is not defined."); + } + + private MemcachedNode getNode(String attribute) { + try { + if (attribute.contains(DELIMETER)) { + MemcachedNode memcachedNode = nodes.get(attribute + .split(DELIMETER)[1]); + if (memcachedNode instanceof TCPMemcachedNodeImpl) { + return memcachedNode; + } else { + return null; + } + } else { + return null; + } + } catch (Exception e) { + return null; + } + } + + @Override + public AttributeList getAttributes(String[] attributes) { + AttributeList list = new AttributeList(); + + for (String attribute : attributes) { + try { + list.add(new Attribute(attribute, getAttribute(attribute))); + } catch (Exception e) { + + } + } + + return list; + } + + @Override + public MBeanInfo getMBeanInfo() { + List attributes = new ArrayList(); + + // global input queue + attributes.add(new MBeanAttributeInfo(ADDED_Q, "long", + "added queue size", true, false, false)); + + // statistics information on each connection + for (Entry entry : nodes.entrySet()) { + // reconnect count + attributes.add(new MBeanAttributeInfo(RECONN_CNT + DELIMETER + + entry.getValue().getSocketAddress().toString(), "int", + "reconnect count", true, false, false)); + + // continuous timeout count + attributes.add(new MBeanAttributeInfo(CONT_TIMEOUT + DELIMETER + + entry.getValue().getSocketAddress().toString(), "int", + "continuous timeout count", true, false, false)); + + // read queue + attributes.add(new MBeanAttributeInfo(INPUT_Q + DELIMETER + + entry.getValue().getSocketAddress().toString(), "int", + "input queue count", true, false, false)); + + // read queue + attributes.add(new MBeanAttributeInfo(READ_Q + DELIMETER + + entry.getValue().getSocketAddress().toString(), "int", + "read queue count", true, false, false)); + + // write queue + attributes.add(new MBeanAttributeInfo(WRITE_Q + DELIMETER + + entry.getValue().getSocketAddress().toString(), "int", + "write queue count", true, false, false)); + } + + getLogger().info("retrieve client statistics mbean informations."); + + return new MBeanInfo(this.getClass().getName(), + "Arcus client statistics MBean", + attributes.toArray(new MBeanAttributeInfo[0]), null, null, null); + } + + @Override + public Object invoke(String actionName, Object[] params, String[] signature) + throws MBeanException, ReflectionException { + return null; + } + + @Override + public void setAttribute(Attribute attribute) + throws AttributeNotFoundException, InvalidAttributeValueException, + MBeanException, ReflectionException { + } + + @Override + public AttributeList setAttributes(AttributeList attributes) { + return null; + } +} diff --git a/src/main/java/net/spy/memcached/auth/AuthDescriptor.java b/src/main/java/net/spy/memcached/auth/AuthDescriptor.java new file mode 100644 index 000000000..bebe7b410 --- /dev/null +++ b/src/main/java/net/spy/memcached/auth/AuthDescriptor.java @@ -0,0 +1,59 @@ +package net.spy.memcached.auth; + +import javax.security.auth.callback.CallbackHandler; + +/** + * Information required to specify authentication mechanisms and callbacks. + */ +public class AuthDescriptor { + + public final String[] mechs; + public final CallbackHandler cbh; + private int authAttempts; + private int allowedAuthAttempts; + + /** + * Request authentication using the given list of mechanisms and callback + * handler. + * + * @param m list of mechanisms + * @param h the callback handler for grabbing credentials and stuff + */ + public AuthDescriptor(String[] m, CallbackHandler h) { + mechs=m; + cbh=h; + authAttempts = 0; + String authThreshhold=System.getProperty( + "net.spy.memcached.auth.AuthThreshold"); + if (authThreshhold != null) { + allowedAuthAttempts = Integer.parseInt(authThreshhold); + } else { + allowedAuthAttempts = -1; // auth forever + } + } + + /** + * Get a typical auth descriptor for CRAM-MD5 or PLAIN auth with the given + * username and password. + * + * @param u the username + * @param p the password + * + * @return an AuthDescriptor + */ + public static AuthDescriptor typical(String u, String p) { + return new AuthDescriptor(new String[]{"CRAM-MD5", "PLAIN"}, + new PlainCallbackHandler(u, p)); + } + + public boolean authThresholdReached() { + if (allowedAuthAttempts < 0) { + return false; // negative value means auth forever + } else if (authAttempts >= allowedAuthAttempts) { + return true; + } else { + authAttempts++; + return false; + } + } +} diff --git a/src/main/java/net/spy/memcached/auth/AuthThread.java b/src/main/java/net/spy/memcached/auth/AuthThread.java new file mode 100644 index 000000000..a2260c99a --- /dev/null +++ b/src/main/java/net/spy/memcached/auth/AuthThread.java @@ -0,0 +1,104 @@ +package net.spy.memcached.auth; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.MemcachedConnection; +import net.spy.memcached.MemcachedNode; +import net.spy.memcached.OperationFactory; +import net.spy.memcached.compat.SpyThread; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; + +public class AuthThread extends SpyThread { + + private final MemcachedConnection conn; + private final AuthDescriptor authDescriptor; + private final OperationFactory opFact; + private final MemcachedNode node; + + public AuthThread(MemcachedConnection c, OperationFactory o, + AuthDescriptor a, MemcachedNode n) { + conn = c; + opFact = o; + authDescriptor = a; + node = n; + start(); + } + + @Override + public void run() { + OperationStatus priorStatus = null; + final AtomicBoolean done = new AtomicBoolean(); + + while(!done.get()) { + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference foundStatus = + new AtomicReference(); + + final OperationCallback cb=new OperationCallback() { + public void receivedStatus(OperationStatus val) { + // If the status we found was null, we're done. + if(val.getMessage().length() == 0) { + done.set(true); + node.authComplete(); + getLogger().info("Authenticated to " + + node.getSocketAddress()); + } else { + foundStatus.set(val); + } + } + + public void complete() { + latch.countDown(); + } + }; + + // Get the prior status to create the correct operation. + final Operation op = buildOperation(priorStatus, cb); + + conn.insertOperation(node, op); + + try { + latch.await(); + Thread.sleep(100); + } catch(InterruptedException e) { + // we can be interrupted if we were in the + // process of auth'ing and the connection is + // lost or dropped due to bad auth + Thread.currentThread().interrupt(); + if (op != null) { + op.cancel(); + } + done.set(true); // If we were interrupted, tear down. + } + + // Get the new status to inspect it. + priorStatus = foundStatus.get(); + if(priorStatus != null) { + if(!priorStatus.isSuccess()) { + getLogger().warn("Authentication failed to " + + node.getSocketAddress()); + } + } + } + return; + } + + private Operation buildOperation(OperationStatus st, OperationCallback cb) { + if(st == null) { + return opFact.saslAuth(authDescriptor.mechs, + node.getSocketAddress().toString(), null, + authDescriptor.cbh, cb); + } else { + return opFact.saslStep(authDescriptor.mechs, + KeyUtil.getKeyBytes(st.getMessage()), + node.getSocketAddress().toString(), null, + authDescriptor.cbh, cb); + } + + } +} diff --git a/src/main/java/net/spy/memcached/auth/AuthThreadMonitor.java b/src/main/java/net/spy/memcached/auth/AuthThreadMonitor.java new file mode 100644 index 000000000..3e7e4fd26 --- /dev/null +++ b/src/main/java/net/spy/memcached/auth/AuthThreadMonitor.java @@ -0,0 +1,56 @@ +package net.spy.memcached.auth; + +import java.util.HashMap; +import java.util.Map; +import net.spy.memcached.MemcachedConnection; +import net.spy.memcached.MemcachedNode; +import net.spy.memcached.OperationFactory; +import net.spy.memcached.compat.SpyObject; + +/** + * This will ensure no more than one AuthThread will exist for a given + * MemcachedNode. + */ +public class AuthThreadMonitor extends SpyObject { + + private Map nodeMap; + + public AuthThreadMonitor() { + nodeMap = new HashMap(); + } + + /** + * + * Authenticate a new connection. This is typically used by a + * MemcachedNode in order to authenticate a connection right after it + * has been established. + * + * If an old, but not yet completed authentication exists this will + * stop it in order to create a new authentication attempt. + * + * @param conn + * @param opFact + * @param authDescriptor + * @param node + */ + public synchronized void authConnection(MemcachedConnection conn, + OperationFactory opFact, AuthDescriptor authDescriptor, MemcachedNode node) { + interruptOldAuth(node); + AuthThread newSASLAuthenticator = new AuthThread(conn, opFact, + authDescriptor, node); + nodeMap.put(node, newSASLAuthenticator); + } + + private void interruptOldAuth(MemcachedNode nodeToStop) { + AuthThread toStop = nodeMap.get(nodeToStop); + if (toStop != null) { + if (toStop.isAlive()) { + getLogger().warn("Incomplete authentication interrupted for node " + + nodeToStop); + toStop.interrupt(); + } + + nodeMap.remove(nodeToStop); + } + } +} diff --git a/src/main/java/net/spy/memcached/auth/PlainCallbackHandler.java b/src/main/java/net/spy/memcached/auth/PlainCallbackHandler.java new file mode 100644 index 000000000..52097bf2c --- /dev/null +++ b/src/main/java/net/spy/memcached/auth/PlainCallbackHandler.java @@ -0,0 +1,48 @@ +package net.spy.memcached.auth; + +import java.io.IOException; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.CallbackHandler; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.auth.callback.TextOutputCallback; +import javax.security.auth.callback.UnsupportedCallbackException; + +/** + * Callback handler for doing plain auth. + */ +public class PlainCallbackHandler implements CallbackHandler { + + private final String username; + private final char[] password; + + /** + * Construct a plain callback handler with the given username and password. + * + * @param u the username + * @param p the password + */ + public PlainCallbackHandler(String u, String p) { + username=u; + password=p.toCharArray(); + } + + public void handle(Callback[] callbacks) throws IOException, + UnsupportedCallbackException { + for(Callback cb : callbacks) { + if(cb instanceof TextOutputCallback) { + // Not implementing this one yet... + } else if(cb instanceof NameCallback) { + NameCallback nc=(NameCallback)cb; + nc.setName(username); + } else if(cb instanceof PasswordCallback) { + PasswordCallback pc=(PasswordCallback)cb; + pc.setPassword(password); + } else { + throw new UnsupportedCallbackException(cb); + } + } + } + +} diff --git a/src/main/java/net/spy/memcached/auth/package.html b/src/main/java/net/spy/memcached/auth/package.html new file mode 100644 index 000000000..ee435c881 --- /dev/null +++ b/src/main/java/net/spy/memcached/auth/package.html @@ -0,0 +1,17 @@ + + + + + + Auth Utilities. + + + +

Auth Utilities.

+

+ Extra utilities for authentication management. +

+ + + diff --git a/src/main/java/net/spy/memcached/collection/Attributes.java b/src/main/java/net/spy/memcached/collection/Attributes.java new file mode 100644 index 000000000..0501d913f --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/Attributes.java @@ -0,0 +1,100 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.compat.SpyObject; + +public class Attributes extends SpyObject { + public static final Integer DEFAULT_FLAGS = 0; + public static final Integer DEFAULT_EXPIRETIME = 0; + + protected Integer flags; + protected Integer expireTime; + protected CollectionType type; + + private String str; + + public Attributes() { + } + + public Attributes(Integer expireTime) { + this.expireTime = expireTime; + } + + protected String stringify() { + StringBuilder b = new StringBuilder(); + + if (flags != null) + b.append(" flags=").append(flags); + + if (expireTime != null) + b.append(" expiretime=").append(expireTime); + + if (type != null) + b.append(" type=").append(type.getStringValue()); + + str = (b.length() < 1) ? "" : b.substring(1); + + return str; + } + + @Override + public String toString() { + return (str == null) ? stringify() : str; + } + + public int getLength() { + return (str == null) ? stringify().length() : str.length(); + } + + public void setAttribute(String attribute) { + String[] splited = attribute.split("="); + assert splited.length == 2 : "An attribute should be given in \"name=value\" format."; + + String name = splited[0]; + String value = splited[1]; + + try { + if ("flags".equals(name)) { + flags = Integer.parseInt(value); + } else if ("expiretime".equals(name)) { + expireTime = Integer.parseInt(value); + } else if ("type".equals(name)) { + type = CollectionType.find(value); + } + } catch (Exception e) { + getLogger().info(e, e); + assert false : "Unexpected value."; + } + } + + public void setExpireTime(Integer expireTime) { + this.expireTime = expireTime; + } + + public Integer getFlags() { + return flags; + } + + public Integer getExpireTime() { + return expireTime; + } + + public CollectionType getType() { + return type; + } +} diff --git a/src/main/java/net/spy/memcached/collection/BKeyObject.java b/src/main/java/net/spy/memcached/collection/BKeyObject.java new file mode 100644 index 000000000..6c078cbd8 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BKeyObject.java @@ -0,0 +1,108 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +public class BKeyObject { + + public enum BKeyType { + LONG, BYTEARRAY, UNKNOWN + } + + private BKeyType type = BKeyType.UNKNOWN; + private Long longBKey; + private ByteArrayBKey byteArrayBKey; + + public BKeyObject() { + + } + + public BKeyObject(long longBKey) { + setLongBKey(longBKey); + } + + public BKeyObject(byte[] byteArrayBKey) { + setByteArrayBKey(new ByteArrayBKey(byteArrayBKey)); + } + + public BKeyObject(ByteArrayBKey byteArrayBKey) { + setByteArrayBKey(byteArrayBKey); + } + + public BKeyObject(String bkeyString) { + byte[] b = BTreeUtil.hexStringToByteArrays(bkeyString); + ByteArrayBKey byteArrayBKey = new ByteArrayBKey(b); + setByteArrayBKey(byteArrayBKey); + } + + public BKeyType getType() { + return type; + } + + public Long getLongBKey() { + if (BKeyType.LONG == type) { + return longBKey; + } else { + return null; + } + } + + public void setLongBKey(long longBKey) { + this.type = BKeyType.LONG; + this.longBKey = longBKey; + this.byteArrayBKey = null; + } + + public ByteArrayBKey getByteArrayBKey() { + if (BKeyType.BYTEARRAY == type) { + return byteArrayBKey; + } else { + return null; + } + } + + public byte[] getByteArrayBKeyRaw() { + if (BKeyType.BYTEARRAY == type) { + return byteArrayBKey.getBytes(); + } else { + return null; + } + } + + public void setByteArrayBKey(ByteArrayBKey byteArrayBKey) { + this.type = BKeyType.BYTEARRAY; + this.byteArrayBKey = byteArrayBKey; + this.longBKey = null; + } + + public String getBKeyAsString() { + if (BKeyType.LONG == type) { + return String.valueOf(longBKey); + } else if (BKeyType.BYTEARRAY == type) { + return BTreeUtil.toHex(byteArrayBKey.getBytes()); + } else { + return null; + } + } + + @Override + public String toString() { + return getBKeyAsString(); + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeCount.java b/src/main/java/net/spy/memcached/collection/BTreeCount.java new file mode 100644 index 000000000..47d1c195c --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeCount.java @@ -0,0 +1,68 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeCount extends CollectionCount { + + private static final String command = "bop count"; + + protected final String range; + + protected final ElementFlagFilter elementFlagFilter; + + public BTreeCount(long from, long to, ElementFlagFilter elementFlagFilter) { + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeCount(byte[] from, byte[] to, ElementFlagFilter elementFlagFilter) { + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeCount(long from, long to, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public BTreeCount(byte[] from, byte[] to, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public String stringify() { + if (str != null) + return str; + + StringBuilder b = new StringBuilder(); + + b.append(range); + + if (elementFlagFilter != null) { + b.append(" ").append(elementFlagFilter.toString()); + } + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeCreate.java b/src/main/java/net/spy/memcached/collection/BTreeCreate.java new file mode 100644 index 000000000..e845316a9 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeCreate.java @@ -0,0 +1,34 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class BTreeCreate extends CollectionCreate { + + private static final String command = "bop create"; + + public BTreeCreate() { + super(); + } + + public BTreeCreate(int flags, Integer expTime, Long maxCount, CollectionOverflowAction overflowAction, Boolean readable, boolean noreply) { + super(flags, expTime, maxCount, overflowAction, readable, noreply); + } + + public String getCommand() { + return command; + } +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeDelete.java b/src/main/java/net/spy/memcached/collection/BTreeDelete.java new file mode 100644 index 000000000..140fe0474 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeDelete.java @@ -0,0 +1,134 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeDelete extends CollectionDelete { + + private static final String command = "bop delete"; + protected int count = -1; + + protected ElementFlagFilter elementFlagFilter; + + public BTreeDelete(long bkey, boolean noreply) { + this.range = String.valueOf(bkey); + this.noreply = noreply; + } + + public BTreeDelete(long bkey, boolean noreply, boolean dropIfEmpty, ElementFlagFilter elementFlagFilter) { + this(bkey, noreply); + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeDelete(long from, long to, boolean noreply) { + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.noreply = noreply; + } + + public BTreeDelete(long from, long to, int count, boolean noreply) { + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.count = count; + this.noreply = noreply; + } + + public BTreeDelete(long from, long to, int count, boolean noreply, boolean dropIfEmpty, ElementFlagFilter elementFlagFilter) { + this(from, to, count, noreply); + this.dropIfEmpty = dropIfEmpty; + this.noreply = noreply; + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeDelete(byte[] bkey, boolean noreply, boolean dropIfEmpty, ElementFlagFilter elementFlagFilter) { + this.range = BTreeUtil.toHex(bkey); + this.noreply = noreply; + this.dropIfEmpty = dropIfEmpty; + this.noreply = noreply; + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeDelete(byte[] from, byte[] to, int count, boolean noreply, boolean dropIfEmpty, ElementFlagFilter elementFlagFilter) { + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.count = count; + this.noreply = noreply; + this.dropIfEmpty = dropIfEmpty; + this.noreply = noreply; + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeDelete(long bkey, boolean noreply, boolean dropIfEmpty, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this(bkey, noreply); + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public BTreeDelete(long from, long to, int count, boolean noreply, boolean dropIfEmpty, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this(from, to, count, noreply); + this.dropIfEmpty = dropIfEmpty; + this.noreply = noreply; + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public BTreeDelete(byte[] bkey, boolean noreply, boolean dropIfEmpty, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this.range = BTreeUtil.toHex(bkey); + this.noreply = noreply; + this.dropIfEmpty = dropIfEmpty; + this.noreply = noreply; + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public BTreeDelete(byte[] from, byte[] to, int count, boolean noreply, boolean dropIfEmpty, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.count = count; + this.noreply = noreply; + this.dropIfEmpty = dropIfEmpty; + this.noreply = noreply; + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + b.append(range); + + if (elementFlagFilter != null) { + b.append(" ").append(elementFlagFilter.toString()); + } + + if (count >= 0) { + b.append(" ").append(count); + } + + if (dropIfEmpty) { + b.append(" drop"); + } + + if (noreply) { + b.append(" noreply"); + } + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeElement.java b/src/main/java/net/spy/memcached/collection/BTreeElement.java new file mode 100644 index 000000000..9c795a4f3 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeElement.java @@ -0,0 +1,41 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class BTreeElement { + private final K bkey; + private final V value; + private final byte[] eflag; + + public BTreeElement(K bkey, byte[] eflag, V value) { + this.bkey = bkey; + this.eflag = eflag; + this.value = value; + } + + public K getBkey() { + return bkey; + } + + public V getValue() { + return value; + } + + public byte[] getEflag() { + return eflag; + } +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeFindPosition.java b/src/main/java/net/spy/memcached/collection/BTreeFindPosition.java new file mode 100644 index 000000000..f42bfa1e1 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeFindPosition.java @@ -0,0 +1,67 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +/** + * Ascii protocol implementation for "bop position" (B+Tree find position) + * + * bop position \r\n = 0 or positive integer + * END\r\n (CLIENT_ERROR, NOT_FOUND, UNREADABLE, BKEY_MISMATCH, TYPE_MISMATCH, + * NOT_FOUND_ELEMENT) + */ +public class BTreeFindPosition { + + private static final String command = "bop position"; + + private final BKeyObject bkeyObject; + private final BTreeOrder order; + private String str; + + public BTreeFindPosition(long longBKey, BTreeOrder order) { + this.bkeyObject = new BKeyObject(longBKey); + this.order = order; + } + + public BTreeFindPosition(byte[] byteArrayBKey, BTreeOrder order) { + this.bkeyObject = new BKeyObject(byteArrayBKey); + this.order = order; + } + + public String stringify() { + if (str != null) return str; + StringBuilder b = new StringBuilder(); + b.append(bkeyObject.getBKeyAsString()); + b.append(" "); + b.append(order.getAscii()); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public BKeyObject getBkeyObject() { + return bkeyObject; + } + + public BTreeOrder getOrder() { + return order; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeGet.java b/src/main/java/net/spy/memcached/collection/BTreeGet.java new file mode 100644 index 000000000..13dba714a --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGet.java @@ -0,0 +1,157 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Map; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeGet extends CollectionGet { + + private static final String command = "bop get"; + + protected String range; + protected int offset = -1; + protected int count = -1; + protected Map map; + + protected ElementFlagFilter elementFlagFilter; + + public BTreeGet(long bkey, boolean delete) { + this.headerCount = 2; + this.range = String.valueOf(bkey); + this.delete = delete; + } + + public BTreeGet(long bkey, boolean delete, boolean dropIfEmpty, ElementFlagFilter elementFlagFilter) { + this(bkey, delete); + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeGet(long from, long to, int offset, int count, boolean delete) { + this.headerCount = 2; + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.offset = offset; + this.count = count; + this.delete = delete; + } + + public BTreeGet(long from, long to, int offset, int count, boolean delete, boolean dropIfEmpty, ElementFlagFilter elementFlagFilter) { + this(from, to, offset, count, delete); + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = elementFlagFilter; + } + + public BTreeGet(long bkey, boolean delete, boolean dropIfEmpty, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this(bkey, delete); + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public BTreeGet(long from, long to, int offset, int count, boolean delete, boolean dropIfEmpty, ElementMultiFlagsFilter elementMultiFlagsFilter) { + this(from, to, offset, count, delete); + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = (ElementFlagFilter)elementMultiFlagsFilter; + } + + public ElementFlagFilter getElementFlagFilter() { + return elementFlagFilter; + } + + public String getRange() { + return range; + } + + public void setRange(String range) { + this.range = range; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } + + public Map getMap() { + return map; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + b.append(range); + + if (elementFlagFilter != null) b.append(" ").append(elementFlagFilter.toString()); + if (offset > 0) b.append(" ").append(offset); + if (count > 0) b.append(" ").append(count); + if (delete && dropIfEmpty) b.append(" drop"); + if (delete && !dropIfEmpty) b.append(" delete"); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public void resetHeaderCount(int count) { + this.headerCount = count; + } + + private int headerParseStep = 1; + + private boolean elementFlagExists = false; + + public boolean eachRecordParseCompleted() { + if (elementFlagExists) { + return headerParseStep == 1; + } else { + return true; + } + } + + @Override + public boolean headerReady(int spaceCount) { + return spaceCount == 2 || spaceCount == 3; + } + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + + if (headerParseStep == 1) { + // found element flag. + if (splited[1].startsWith("0x")) { + this.elementFlagExists = true; + this.subkey = Long.parseLong(splited[0]); + this.elementFlag = BTreeUtil.hexStringToByteArrays(splited[1].substring(2)); +// this.headerCount++; + headerParseStep = 2; + } else { + this.subkey = Long.parseLong(splited[0]); + this.dataLength = Integer.parseInt(splited[1]); + } + } else { + this.headerParseStep = 1; + this.dataLength = Integer.parseInt(splited[1]); + } + } +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeGetBulk.java b/src/main/java/net/spy/memcached/collection/BTreeGetBulk.java new file mode 100644 index 000000000..85381a146 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGetBulk.java @@ -0,0 +1,50 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; + +public interface BTreeGetBulk { + + public String getCommaSeparatedKeys(); + + public String getRepresentKey(); + + public List getKeyList(); + + public String stringify(); + + public String getCommand(); + + public boolean elementHeaderReady(int spaceCount); + + public boolean keyHeaderReady(int spaceCount); + + public String getKey(); + + public int getFlag(); + + public Object getSubkey(); + + public int getDataLength(); + + public byte[] getEFlag(); + + public void decodeItemHeader(String itemHeader); + + public void decodeKeyHeader(String keyHeader); +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeGetBulkImpl.java b/src/main/java/net/spy/memcached/collection/BTreeGetBulkImpl.java new file mode 100644 index 000000000..6136a6cd5 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGetBulkImpl.java @@ -0,0 +1,155 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; +import java.util.Map; + +import net.spy.memcached.util.BTreeUtil; + +public abstract class BTreeGetBulkImpl implements BTreeGetBulk { + + private static final String command = "bop mget"; + + private String commaSeparatedKeys; + + protected String str; + protected int lenKeys; + + protected List keyList; + protected String range; + protected ElementFlagFilter eFlagFilter; + protected int offset = -1; + protected int count; + protected boolean reverse; + + protected Map map; + + public String key; + public int flag; + public Object subkey; + public int dataLength; + public byte[] eflag = null; + + protected BTreeGetBulkImpl(List keyList, byte[] from, byte[] to, + ElementFlagFilter eFlagFilter, int offset, int count) { + + this.keyList = keyList; + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.eFlagFilter = eFlagFilter; + this.offset = offset; + this.count = count; + this.reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; + } + + protected BTreeGetBulkImpl(List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count) { + + this.keyList = keyList; + this.range = String.valueOf(from) + ((to > -1) ? ".." + String.valueOf(to) : ""); + this.eFlagFilter = eFlagFilter; + this.offset = offset; + this.count = count; + this.reverse = (from > to); + } + + public String getCommaSeparatedKeys() { + if (commaSeparatedKeys != null) { + return commaSeparatedKeys; + } + + StringBuilder sb = new StringBuilder(); + int numkeys = keyList.size(); + for (int i = 0; i < numkeys; i++) { + sb.append(keyList.get(i)); + if ((i + 1) < numkeys) { + sb.append(","); + } + } + commaSeparatedKeys = sb.toString(); + return commaSeparatedKeys; + } + + public String getRepresentKey() { + if (keyList == null || keyList.isEmpty()) { + throw new IllegalStateException("Key list is empty."); + } + return keyList.get(0); + } + + public List getKeyList() { + return keyList; + } + + public String stringify() { + if (str != null) + return str; + + /* + * + * bop mget [] + * [] \r\n<"comma separated keys">\r\n + * : [ ] + * + */ + + StringBuilder b = new StringBuilder(); + + b.append(getCommaSeparatedKeys().length()); + b.append(" ").append(keyList.size()); + b.append(" ").append(range); + + if (eFlagFilter != null) + b.append(" ").append(eFlagFilter.toString()); + + if (offset > 0) + b.append(" ").append(offset); + + b.append(" ").append(count); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public boolean elementHeaderReady(int spaceCount) { + return spaceCount == 3 || spaceCount == 4; + } + + public boolean keyHeaderReady(int spaceCount) { + return spaceCount == 3 || spaceCount == 5; + } + + public String getKey() { + return key; + } + + public int getFlag() { + return flag; + } + + public int getDataLength() { + return dataLength; + } + + public byte[] getEFlag() { + return eflag; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeGetBulkWithByteTypeBkey.java b/src/main/java/net/spy/memcached/collection/BTreeGetBulkWithByteTypeBkey.java new file mode 100644 index 000000000..cab11a396 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGetBulkWithByteTypeBkey.java @@ -0,0 +1,58 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeGetBulkWithByteTypeBkey extends BTreeGetBulkImpl { + + public BTreeGetBulkWithByteTypeBkey(List keyList, byte[] from, + byte[] to, ElementFlagFilter eFlagFilter, int offset, int count) { + super(keyList, from, to, eFlagFilter, offset, count); + } + + public byte[] getSubkey() { + return (byte[]) subkey; + } + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + + if (splited.length == 3) { + // ELEMENT + this.subkey = BTreeUtil.hexStringToByteArrays(splited[1].substring(2)); + this.dataLength = Integer.parseInt(splited[2]); + this.eflag = null; + } else if (splited.length == 4) { + // ELEMENT + this.subkey = BTreeUtil.hexStringToByteArrays(splited[1].substring(2)); + this.eflag = BTreeUtil.hexStringToByteArrays(splited[2].substring(2)); + this.dataLength = Integer.parseInt(splited[3]); + } + } + + @Override + public void decodeKeyHeader(String keyHeader) { + String[] splited = keyHeader.split(" "); + this.key = splited[1]; + if (splited.length == 5) { + this.flag = Integer.valueOf(splited[3]); + } + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeGetBulkWithLongTypeBkey.java b/src/main/java/net/spy/memcached/collection/BTreeGetBulkWithLongTypeBkey.java new file mode 100644 index 000000000..eed4dcc09 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGetBulkWithLongTypeBkey.java @@ -0,0 +1,55 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeGetBulkWithLongTypeBkey extends BTreeGetBulkImpl { + + public BTreeGetBulkWithLongTypeBkey(List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count) { + super(keyList, from, to, eFlagFilter, offset, count); + } + + public Long getSubkey() { + return (Long) subkey; + } + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + + if (splited.length == 3) { + // ELEMENT + this.subkey = Long.parseLong(splited[1]); + this.dataLength = Integer.parseInt(splited[2]); + this.eflag = null; + } else if (splited.length == 4) { + // ELEMENT + this.subkey = Long.parseLong(splited[1]); + this.eflag = BTreeUtil.hexStringToByteArrays(splited[2].substring(2)); + this.dataLength = Integer.parseInt(splited[3]); + } + } + + @Override + public void decodeKeyHeader(String keyHeader) { + String[] splited = keyHeader.split(" "); + this.key = splited[1]; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeGetByPosition.java b/src/main/java/net/spy/memcached/collection/BTreeGetByPosition.java new file mode 100644 index 000000000..675934269 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGetByPosition.java @@ -0,0 +1,150 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +/** + * Ascii protocol implementation for "bop gbp" (B+Tree get by position) + * + * bop gbp \r\n + * VALUE \r\n + * [] \r\n + * END\r\n (CLIENT_ERROR, NOT_FOUND, UNREADABLE, TYPE_MISMATCH, NOT_FOUND_ELEMENT) + */ +public class BTreeGetByPosition extends CollectionGet { + + public static final int HEADER_EFLAG_POSITION = 1; // 0-based + + private static final String command = "bop gbp"; + + private final BTreeOrder order; + private final String range; + private final int posFrom; + private final int posTo; + + private BKeyObject bkey; + private byte[] eflag; + private int bytes; + + public BTreeGetByPosition(BTreeOrder order, int pos) { + this.headerCount = 2; + this.order = order; + this.range = String.valueOf(pos); + this.posFrom = pos; + this.posTo = pos; + } + + public BTreeGetByPosition(BTreeOrder order, int posFrom, int posTo) { + this.headerCount = 2; + this.order = order; + this.range = String.valueOf(posFrom) + ".." + String.valueOf(posTo); + this.posFrom = posFrom; + this.posTo = posTo; + } + + public BTreeOrder getOrder() { + return order; + } + + public String getRange() { + return range; + } + + public String stringify() { + if (str != null) return str; + StringBuilder b = new StringBuilder(); + b.append(order.getAscii()); + b.append(" "); + b.append(range); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + @Override + public boolean headerReady(int spaceCount) { + return spaceCount == 2; + } + + private static final int BKEY = 0; + private static final int EFLAG_OR_BYTES = 1; + private static final int BYTES = 2; + + /* + * VALUE \r\n + * [] \r\n + * END\r\n + */ + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + boolean hasEFlag = false; + + // + if (splited[BKEY].startsWith("0x")) { + this.bkey = new BKeyObject(splited[0].substring(2)); + } else { + this.bkey = new BKeyObject(Integer.parseInt(splited[0])); + } + + // or + if (splited[EFLAG_OR_BYTES].startsWith("0x")) { + // + hasEFlag = true; + this.eflag = BTreeUtil + .hexStringToByteArrays(splited[EFLAG_OR_BYTES].substring(2)); + } else { + this.bytes = Integer.parseInt(splited[EFLAG_OR_BYTES]); + } + + // + if (hasEFlag) { + this.bytes = Integer.parseInt(splited[BYTES]); + } + + this.dataLength = bytes; + } + + public BKeyObject getBkey() { + return bkey; + } + + public byte[] getEflag() { + return eflag; + } + + public int getBytes() { + return bytes; + } + + public int getPosFrom() { + return posFrom; + } + + public int getPosTo() { + return posTo; + } + + public boolean isReversed() { + return posFrom > posTo; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeGetResult.java b/src/main/java/net/spy/memcached/collection/BTreeGetResult.java new file mode 100644 index 000000000..f3abcc192 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeGetResult.java @@ -0,0 +1,46 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Map; +import java.util.SortedMap; + +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BTreeGetResult { + + private final SortedMap> elements; + private final CollectionOperationStatus opStatus; + + public BTreeGetResult(SortedMap> elements, + CollectionOperationStatus opStatus) { + this.elements = elements; + this.opStatus = opStatus; + } + + public Map> getElements() { + return elements; + } + + public CollectionOperationStatus getCollectionResponse() { + return opStatus; + } + + public void addElement(BTreeElement element) { + this.elements.put(element.getBkey(), element); + } +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeMutate.java b/src/main/java/net/spy/memcached/collection/BTreeMutate.java new file mode 100644 index 000000000..c4ada1964 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeMutate.java @@ -0,0 +1,51 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.ops.Mutator; + +public class BTreeMutate extends CollectionMutate { + + private final String command; + + protected final int by; + + public BTreeMutate(Mutator m, int by) { + if (Mutator.incr == m) { + command = "bop incr"; + } else { + command = "bop decr"; + } + + this.by = by; + } + + public String stringify() { + if (str != null) + return str; + + StringBuilder b = new StringBuilder(); + b.append(by); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeOrder.java b/src/main/java/net/spy/memcached/collection/BTreeOrder.java new file mode 100644 index 000000000..6725db4ad --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeOrder.java @@ -0,0 +1,37 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +/** + * A type component for "bop position" and "bop gbp" + */ +public enum BTreeOrder { + ASC("asc"), + DESC("desc") + ; + + private String ascii; + + BTreeOrder(String ascii) { + this.ascii = ascii; + } + + public String getAscii() { + return ascii; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeSMGet.java b/src/main/java/net/spy/memcached/collection/BTreeSMGet.java new file mode 100644 index 000000000..89fbd62b8 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeSMGet.java @@ -0,0 +1,50 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; + +public interface BTreeSMGet { + + public int headerCount = 4; + + public String getCommaSeparatedKeys(); + + public String getRepresentKey(); + + public List getKeyList(); + + public String stringify(); + + public String getCommand(); + + public boolean headerReady(int spaceCount); + + public String getKey(); + + public int getFlag(); + + public Object getSubkey(); + + public int getDataLength(); + + public boolean isReverse(); + + public boolean hasEflag(); + + public void decodeItemHeader(String itemHeader); +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeSMGetWithByteTypeBkey.java b/src/main/java/net/spy/memcached/collection/BTreeSMGetWithByteTypeBkey.java new file mode 100644 index 000000000..d5a698f29 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeSMGetWithByteTypeBkey.java @@ -0,0 +1,171 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; +import java.util.Map; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeSMGetWithByteTypeBkey implements BTreeSMGet { + + private static final String command = "bop smget"; + + protected String str; + + protected List keyList; + private String commaSeparatedKeys; + + protected int lenKeys; + + protected String range; + protected int offset = -1; + protected int count; + protected Map map; + + protected boolean reverse; + + public String key; + public int flag; + public byte[] subkey; + public int dataLength; + + public byte[] eflag = null; + + private ElementFlagFilter eFlagFilter; + + public BTreeSMGetWithByteTypeBkey(List keyList, byte[] from, + byte[] to, ElementFlagFilter eFlagFilter, int offset, int count) { + this.keyList = keyList; + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.eFlagFilter = eFlagFilter; + this.offset = offset; + this.count = count; + this.reverse = BTreeUtil.compareByteArraysInLexOrder(from, to) > 0; + } + + public String getCommaSeparatedKeys() { + if (commaSeparatedKeys != null) { + return commaSeparatedKeys; + } + + StringBuilder sb = new StringBuilder(); + int numkeys = keyList.size(); + for (int i = 0; i < numkeys; i++) { + sb.append(keyList.get(i)); + if ((i + 1) < numkeys) { + sb.append(","); + } + } + commaSeparatedKeys = sb.toString(); + return commaSeparatedKeys; + } + + public String getRepresentKey() { + if (keyList == null || keyList.isEmpty()) { + throw new IllegalStateException("Key list is empty."); + } + return keyList.get(0); + } + + public List getKeyList() { + return keyList; + } + + public String stringify() { + if (str != null) + return str; + + StringBuilder b = new StringBuilder(); + + b.append(getCommaSeparatedKeys().length()); + b.append(" ").append(keyList.size()); + b.append(" ").append(range); + + if (eFlagFilter != null) + b.append(" ").append(eFlagFilter.toString()); + + if (offset > 0) + b.append(" ").append(offset); + + b.append(" ").append(count); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public boolean headerReady(int spaceCount) { + return headerCount == spaceCount; + } + + public String getKey() { + return key; + } + + public int getFlag() { + return flag; + } + + public byte[] getSubkey() { + return subkey; + } + + public int getDataLength() { + return dataLength; + } + + public boolean isReverse() { + return reverse; + } + + public boolean hasEflag() { + return eflag != null; + } + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + + /* + with flag + VALUE 1 + SMGetTest31 0 0x01 0x45464C4147 6 VALUE1 + MISSED_KEYS 0 + END + + without flag + VALUE 1 + SMGetTest31 0 0x01 6 VALUE1 + MISSED_KEYS 0 + END + */ + this.key = splited[0]; + this.flag = Integer.parseInt(splited[1]); + this.subkey = BTreeUtil.hexStringToByteArrays(splited[2].substring(2)); + + if (splited[3].startsWith("0x")) { + this.eflag = BTreeUtil.hexStringToByteArrays(splited[3].substring(2)); + this.dataLength = Integer.parseInt(splited[4]); + } else { + this.eflag = null; + this.dataLength = Integer.parseInt(splited[3]); + } + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeSMGetWithLongTypeBkey.java b/src/main/java/net/spy/memcached/collection/BTreeSMGetWithLongTypeBkey.java new file mode 100644 index 000000000..c975b2751 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeSMGetWithLongTypeBkey.java @@ -0,0 +1,174 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; +import java.util.Map; + +import net.spy.memcached.util.BTreeUtil; + +public class BTreeSMGetWithLongTypeBkey implements BTreeSMGet { + + private static final String command = "bop smget"; + + protected String str; + + protected List keyList; + private String commaSeparatedKeys; + + protected int lenKeys; + + protected String range; + protected int offset = -1; + protected int count; + protected Map map; + + protected boolean reverse; + + public String key; + public int flag; + public long subkey; + public int dataLength; + + public byte[] eflag = null; + + private ElementFlagFilter eFlagFilter; + + public BTreeSMGetWithLongTypeBkey(List keyList, long from, long to, + ElementFlagFilter eFlagFilter, int offset, int count) { + this.keyList = keyList; + + this.range = String.valueOf(from) + + ((to > -1) ? ".." + String.valueOf(to) : ""); + + this.eFlagFilter = eFlagFilter; + this.offset = offset; + this.count = count; + this.reverse = (from > to); + } + + public String getCommaSeparatedKeys() { + if (commaSeparatedKeys != null) { + return commaSeparatedKeys; + } + + StringBuilder sb = new StringBuilder(); + int numkeys = keyList.size(); + for (int i = 0; i < numkeys; i++) { + sb.append(keyList.get(i)); + if ((i + 1) < numkeys) { + sb.append(","); + } + } + commaSeparatedKeys = sb.toString(); + return commaSeparatedKeys; + } + + public String getRepresentKey() { + if (keyList == null || keyList.isEmpty()) { + throw new IllegalStateException("Key list is empty."); + } + return keyList.get(0); + } + + public List getKeyList() { + return keyList; + } + + public String stringify() { + if (str != null) + return str; + + StringBuilder b = new StringBuilder(); + + b.append(getCommaSeparatedKeys().length()); + b.append(" ").append(keyList.size()); + b.append(" ").append(range); + + if (eFlagFilter != null) + b.append(" ").append(eFlagFilter.toString()); + + if (offset > 0) + b.append(" ").append(offset); + + b.append(" ").append(count); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public boolean headerReady(int spaceCount) { + return headerCount == spaceCount; + } + + public String getKey() { + return key; + } + + public int getFlag() { + return flag; + } + + public Long getSubkey() { + return subkey; + } + + public int getDataLength() { + return dataLength; + } + + public boolean isReverse() { + return reverse; + } + + public boolean hasEflag() { + return eflag != null; + } + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + + /* + with flag + VALUE 1 + SMGetTest31 0 1 0x45464C4147 6 VALUE1 + MISSED_KEYS 0 + END + + without flag + VALUE 1 + SMGetTest31 0 1 6 VALUE1 + MISSED_KEYS 0 + END + */ + this.key = splited[0]; + this.flag = Integer.parseInt(splited[1]); + this.subkey = Long.parseLong(splited[2]); + + if (splited[3].startsWith("0x")) { + this.eflag = BTreeUtil.hexStringToByteArrays(splited[3].substring(2)); + this.dataLength = Integer.parseInt(splited[4]); + } else { + this.eflag = null; + this.dataLength = Integer.parseInt(splited[3]); + } + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/collection/BTreeStore.java b/src/main/java/net/spy/memcached/collection/BTreeStore.java new file mode 100644 index 000000000..7f5974beb --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeStore.java @@ -0,0 +1,35 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class BTreeStore extends CollectionStore { + + private static final String command = "bop insert"; + + public BTreeStore() { + super(); + } + + public BTreeStore(T value, byte[] eFlag, boolean createKeyIfNotExists, RequestMode requestMode, CollectionAttributes attr) { + super(value, eFlag, createKeyIfNotExists, requestMode, attr); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeStoreAndGet.java b/src/main/java/net/spy/memcached/collection/BTreeStoreAndGet.java new file mode 100644 index 000000000..417cab74c --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeStoreAndGet.java @@ -0,0 +1,122 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +/** + * Ascii protocol implementation for store and get(trimmed) operations + * - bop insert [] [create ] getrim\r\n\r\n + * - bop upsert [] [create ] getrim\r\n\r\n + * VALUE \r\n + * [] \r\n + * TRIMMED\r\n + * @param + */ +public class BTreeStoreAndGet extends BTreeStore { + + // FIXME please refactor this :-( subclass-ing needed + public enum Command { + INSERT("bop insert"), + UPSERT("bop upsert"); + + private final String command; + + Command(String command) { + this.command = command; + } + + public String getCommand() { + return command; + } + } + + public static final int HEADER_EFLAG_POSITION = 1; // 0-based + + private Command cmd; + private BKeyObject bkeyObject; + private int bytes; + + public BTreeStoreAndGet(Command cmd, long bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate) { + super(value, eFlag, attributesForCreate != null, RequestMode.GET_TRIM, + attributesForCreate); + this.cmd = cmd; + this.bkeyObject = new BKeyObject(bkey); + } + + public BTreeStoreAndGet(Command cmd, byte[] bkey, byte[] eFlag, T value, + CollectionAttributes attributesForCreate) { + super(value, eFlag, attributesForCreate != null, RequestMode.GET_TRIM, + attributesForCreate); + this.cmd = cmd; + this.bkeyObject = new BKeyObject(bkey); + } + + public BKeyObject getBkeyObject() { + return bkeyObject; + } + + public boolean headerReady(int spaceCount) { + return spaceCount == 2; + } + + private static final int BKEY = 0; + private static final int EFLAG_OR_BYTES = 1; + private static final int BYTES = 2; + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + boolean hasEFlag = false; + + // + if (splited[BKEY].startsWith("0x")) { + this.bkeyObject = new BKeyObject(splited[0].substring(2)); + } else { + this.bkeyObject = new BKeyObject(Integer.parseInt(splited[0])); + } + + // or + if (splited[EFLAG_OR_BYTES].startsWith("0x")) { + // + hasEFlag = true; + this.elementFlag = BTreeUtil + .hexStringToByteArrays(splited[EFLAG_OR_BYTES].substring(2)); + } else { + this.bytes = Integer.parseInt(splited[EFLAG_OR_BYTES]); + } + + // + if (hasEFlag) { + this.bytes = Integer.parseInt(splited[BYTES]); + } + } + + public int getBytes() { + return bytes; + } + + public Command getCmd() { + return cmd; + } + + @Override + public String getCommand() { + return cmd.getCommand(); + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeUpdate.java b/src/main/java/net/spy/memcached/collection/BTreeUpdate.java new file mode 100644 index 000000000..db49f516e --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeUpdate.java @@ -0,0 +1,31 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class BTreeUpdate extends CollectionUpdate { + + private static final String command = "bop update"; + + public BTreeUpdate(T newValue, ElementFlagUpdate elementFlagUpdate, boolean noreply) { + super(newValue, elementFlagUpdate, noreply); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/BTreeUpsert.java b/src/main/java/net/spy/memcached/collection/BTreeUpsert.java new file mode 100644 index 000000000..31ba3d550 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/BTreeUpsert.java @@ -0,0 +1,36 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class BTreeUpsert extends CollectionStore { + + private static final String command = "bop upsert"; + + public BTreeUpsert() { + super(); + } + + public BTreeUpsert(T value, byte[] eFlag, boolean createKeyIfNotExists, + RequestMode requestMode, CollectionAttributes attr) { + super(value, eFlag, createKeyIfNotExists, requestMode, attr); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/ByteArrayBKey.java b/src/main/java/net/spy/memcached/collection/ByteArrayBKey.java new file mode 100644 index 000000000..ca0d4591c --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ByteArrayBKey.java @@ -0,0 +1,54 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Arrays; + +import net.spy.memcached.util.BTreeUtil; + +public class ByteArrayBKey implements Comparable { + + public static final byte[] MIN = new byte[] { (byte) 0 }; + + public static final byte[] MAX = new byte[] { (byte) 255, (byte) 255, + (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255, + (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255, + (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255, + (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255, + (byte) 255, (byte) 255, (byte) 255, (byte) 255, (byte) 255, + (byte) 255, (byte) 255, (byte) 255, (byte) 255 }; + + private final byte[] bkey; + + public ByteArrayBKey(byte[] bkey) { + this.bkey = bkey; + } + + public byte[] getBytes() { + return bkey; + } + + @Override + public int compareTo(ByteArrayBKey o) { + return BTreeUtil.compareByteArraysInLexOrder(bkey, o.getBytes()); + } + + @Override + public String toString() { + return Arrays.toString(bkey); + } +} diff --git a/src/main/java/net/spy/memcached/collection/ByteArrayTreeMap.java b/src/main/java/net/spy/memcached/collection/ByteArrayTreeMap.java new file mode 100644 index 000000000..d5fcc5042 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ByteArrayTreeMap.java @@ -0,0 +1,38 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Comparator; +import java.util.TreeMap; + +public final class ByteArrayTreeMap extends TreeMap { + + private static final long serialVersionUID = -304580135331634224L; + + public ByteArrayTreeMap(Comparator comparator) { + super(comparator); + } + + @Override + public V get(Object key) { + if (key instanceof byte[]) { + return super.get(new ByteArrayBKey((byte[]) key)); + } else { + return super.get(key); + } + } +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionAttributes.java b/src/main/java/net/spy/memcached/collection/CollectionAttributes.java new file mode 100644 index 000000000..1c8e74f3a --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionAttributes.java @@ -0,0 +1,216 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +public class CollectionAttributes extends Attributes { + + public static final Long DEFAULT_MAXCOUNT = 4000L; + public static final CollectionOverflowAction DEFAULT_OVERFLOWACTION = CollectionOverflowAction.tail_trim; + + private Long count; + private Long maxCount; + private CollectionOverflowAction overflowAction; + private Boolean readable; + + private Long maxBkeyRange = null; + private byte[] maxBkeyRangeByBytes = null; + private Long minBkey = null; + private byte[] minBkeyByBytes = null; + private Long maxBkey = null; + private byte[] maxBkeyByBytes = null; + private Long trimmed = null; + + private String str; + + public CollectionAttributes() { } + + public CollectionAttributes(Integer expireTime, + Long maxCount, CollectionOverflowAction overflowAction) { + this.expireTime = expireTime; + this.maxCount = maxCount; + this.overflowAction = overflowAction; + } + + protected String stringify() { + StringBuilder b = new StringBuilder(); + + if (flags != null) + b.append(" flags=").append(flags); + if (expireTime != null) + b.append(" expiretime=").append(expireTime); + if (type != null) + b.append(" type=").append(type.getStringValue()); + if (count != null) + b.append(" count=").append(count); + if (maxCount != null) + b.append(" maxcount=").append(maxCount); + if (overflowAction != null) + b.append(" overflowaction=").append(String.valueOf(overflowAction)); + if (readable != null) + b.append(" readable=").append((readable) ? "on" : "off"); + if (maxBkeyRange != null || maxBkeyRangeByBytes != null) { + if (maxBkeyRange != null) { + b.append(" maxbkeyrange=").append(String.valueOf(maxBkeyRange)); + } else { + b.append(" maxbkeyrange=").append( + BTreeUtil.toHex(maxBkeyRangeByBytes)); + } + } + + str = (b.length() < 1)? "" : b.substring(1); + return str; + } + + @Override + public String toString() { + return (str == null)? stringify() : str; + } + + public int getLength() { + return (str == null)? stringify().length() : str.length(); + } + + public void setAttribute(String attribute) { + String[] splited = attribute.split("="); + assert splited.length == 2 : "An attribute should be given in \"name=value\" format."; + + String name = splited[0]; + String value = splited[1]; + + try { + if ("flags".equals(name)) { + flags = Integer.parseInt(value); + } else if ("expiretime".equals(name)) { + expireTime = Integer.parseInt(value); + } else if ("type".equals(name)) { + type + = CollectionType.find(value); + } else if ("count".equals(name)) { + count = Long.parseLong(value); + } else if ("maxcount".equals(name)) { + maxCount = Long.parseLong(value); + } else if ("overflowaction".equals(name)) { + overflowAction = CollectionOverflowAction.valueOf(value); + } else if ("readable".equals(name)) { + readable = "on".equals(value); + } else if ("maxbkeyrange".equals(name)) { + if (value.startsWith("0x")) { + maxBkeyRangeByBytes = BTreeUtil.hexStringToByteArrays(value.substring(2)); + } else { + maxBkeyRange = Long.parseLong(value); + } + } else if ("minbkey".equals(name)) { + if (!value.startsWith("-1")) { + if (value.startsWith("0x")) { + minBkeyByBytes = BTreeUtil.hexStringToByteArrays(value + .substring(2)); + } else { + minBkey = Long.parseLong(value); + } + } + } else if ("maxbkey".equals(name)) { + if (!value.startsWith("-1")) { + if (value.startsWith("0x")) { + maxBkeyByBytes = BTreeUtil.hexStringToByteArrays(value + .substring(2)); + } else { + maxBkey = Long.parseLong(value); + } + } + } else if ("trimmed".equals(name)) { + trimmed = Long.parseLong(value); + } + } catch (Exception e) { + getLogger().info(e, e); + assert false : "Unexpected value."; + } + } + + public void setMaxCount(long maxCount) { + this.str = null; + this.maxCount = maxCount; + } + + public void setOverflowAction(CollectionOverflowAction overflowAction) { + this.str = null; + this.overflowAction = overflowAction; + } + + public void setReadable(Boolean readable) { + this.str = null; + this.readable = readable; + } + + public Long getCount() { + return count; + } + + public Long getMaxCount() { + return maxCount; + } + + public CollectionOverflowAction getOverflowAction() { + return overflowAction; + } + + public Boolean getReadable() { + return readable; + } + + public Long getMaxBkeyRange() { + return maxBkeyRange; + } + + public void setMaxBkeyRange(Long maxBkeyRange) { + this.str = null; + this.maxBkeyRange = maxBkeyRange; + } + + public byte[] getMaxBkeyRangeByBytes() { + return maxBkeyRangeByBytes; + } + + public void setMaxBkeyRangeByBytes(byte[] maxBkeyRangeByBytes) { + this.maxBkeyRangeByBytes = maxBkeyRangeByBytes; + } + + public Long getMinBkey() { + return minBkey; + } + + public void setMinBkey(Long minBkey) { + this.minBkey = minBkey; + } + + public byte[] getMinBkeyByBytes() { + return minBkeyByBytes; + } + + public Long getMaxBkey() { + return maxBkey; + } + + public byte[] getMaxBkeyByBytes() { + return maxBkeyByBytes; + } + + public Long getTrimmed() { + return trimmed; + } +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionBulkStore.java b/src/main/java/net/spy/memcached/collection/CollectionBulkStore.java new file mode 100644 index 000000000..769e4b730 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionBulkStore.java @@ -0,0 +1,280 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.List; + +import net.spy.memcached.CachedData; +import net.spy.memcached.KeyUtil; +import net.spy.memcached.transcoders.Transcoder; +import net.spy.memcached.util.BTreeUtil; + +public abstract class CollectionBulkStore extends CollectionObject { + + public static final String PIPE = "pipe"; + public static final int MAX_PIPED_ITEM_COUNT = 500; + + protected List keyList; + protected T value; + protected CachedData cachedData; + protected boolean createKeyIfNotExists; + protected Transcoder tc; + protected int itemCount; + + protected CollectionAttributes attribute; + + public abstract ByteBuffer getAsciiCommand(); + + public abstract ByteBuffer getBinaryCommand(); + + /** + * + */ + public static class BTreeBulkStore extends CollectionBulkStore { + + private static final String COMMAND = "bop insert"; + + private final String bkey; + private final String eflag; + + public BTreeBulkStore(List keyList, long bkey, byte[] eflag, + T value, CollectionAttributes attr, Transcoder tc) { + this.keyList = keyList; + this.bkey = String.valueOf(bkey); + this.eflag = BTreeUtil.toHex(eflag); + this.value = value; + this.attribute = attr; + this.tc = tc; + this.itemCount = keyList.size(); + this.createKeyIfNotExists = (attr != null); + this.cachedData = tc.encode(value); + } + + public BTreeBulkStore(List keyList, byte[] bkey, + byte[] eflag, T value, CollectionAttributes attr, + Transcoder tc) { + this.keyList = keyList; + this.bkey = BTreeUtil.toHex(bkey); + this.eflag = BTreeUtil.toHex(eflag); + this.value = value; + this.attribute = attr; + this.tc = tc; + this.itemCount = keyList.size(); + this.createKeyIfNotExists = (attr != null); + this.cachedData = tc.encode(value); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // estimate the buffer capacity + int eachExtraSize = bkey.length() + + ((eflag != null) ? eflag.length() : 0) + + cachedData.getData().length + 64; + for (String eachKey : keyList) { + capacity += KeyUtil.getKeyBytes(eachKey).length; + } + capacity += eachExtraSize * keyList.size(); + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + Iterator iterator = keyList.iterator(); + + while (iterator.hasNext()) { + String key = iterator.next(); + byte[] value = cachedData.getData(); + + setArguments( + bb, + COMMAND, + key, + bkey, + (eflag != null) ? eflag : "", + value.length, + (createKeyIfNotExists) ? "create" : "", + (createKeyIfNotExists) ? cachedData.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getExpireTime() != null) ? attribute + .getExpireTime() + : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getMaxCount() != null) ? attribute + .getMaxCount() + : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(value); + bb.put(CRLF); + } + + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + public static class SetBulkStore extends CollectionBulkStore { + + private static final String COMMAND = "sop insert"; + + public SetBulkStore(List keyList, T value, + CollectionAttributes attr, Transcoder tc) { + this.keyList = keyList; + this.value = value; + this.attribute = attr; + this.tc = tc; + this.itemCount = keyList.size(); + this.createKeyIfNotExists = (attr != null); + this.cachedData = tc.encode(value); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // estimate the buffer capacity + int eachExtraSize = cachedData.getData().length + 64; + for (String eachKey : keyList) { + capacity += KeyUtil.getKeyBytes(eachKey).length; + } + capacity += eachExtraSize * keyList.size(); + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + Iterator iterator = keyList.iterator(); + + while (iterator.hasNext()) { + String key = iterator.next(); + byte[] value = cachedData.getData(); + + setArguments( + bb, + COMMAND, + key, + value.length, + (createKeyIfNotExists) ? "create" : "", + (createKeyIfNotExists) ? cachedData.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getExpireTime() != null) ? attribute + .getExpireTime() + : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getMaxCount() != null) ? attribute + .getMaxCount() + : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(value); + bb.put(CRLF); + } + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + public static class ListBulkStore extends CollectionBulkStore { + + private static final String COMMAND = "lop insert"; + private int index; + + public ListBulkStore(List keyList, int index, T value, + CollectionAttributes attr, Transcoder tc) { + this.keyList = keyList; + this.index = index; + this.value = value; + this.attribute = attr; + this.tc = tc; + this.itemCount = keyList.size(); + this.createKeyIfNotExists = (attr != null); + this.cachedData = tc.encode(value); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // estimate the buffer capacity + int eachExtraSize = String.valueOf(index).length() + + cachedData.getData().length + 64; + for (String eachKey : keyList) { + capacity += KeyUtil.getKeyBytes(eachKey).length; + } + capacity += eachExtraSize * keyList.size(); + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + Iterator iterator = keyList.iterator(); + + while (iterator.hasNext()) { + String key = iterator.next(); + byte[] value = cachedData.getData(); + + setArguments( + bb, + COMMAND, + key, + index, + value.length, + (createKeyIfNotExists) ? "create" : "", + (createKeyIfNotExists) ? cachedData.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getExpireTime() != null) ? attribute + .getExpireTime() + : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getMaxCount() != null) ? attribute + .getMaxCount() + : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(value); + bb.put(CRLF); + } + + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + public List getKeyList() { + return this.keyList; + } + + public int getItemCount() { + return this.itemCount; + } +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionCount.java b/src/main/java/net/spy/memcached/collection/CollectionCount.java new file mode 100644 index 000000000..30d9aa79b --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionCount.java @@ -0,0 +1,24 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class CollectionCount { + protected String str; + + public abstract String stringify(); + public abstract String getCommand(); +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionCreate.java b/src/main/java/net/spy/memcached/collection/CollectionCreate.java new file mode 100644 index 000000000..1db09825f --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionCreate.java @@ -0,0 +1,71 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class CollectionCreate { + protected int flags; + protected int expTime; + protected long maxCount; + protected CollectionOverflowAction overflowAction; + protected Boolean readable; + protected boolean noreply; + + protected String str; + + public CollectionCreate() { } + + public CollectionCreate(int flags, Integer expTime, Long maxCount, CollectionOverflowAction overflowAction, Boolean readable, boolean noreply) { + this.flags = flags; + this.expTime = (null == expTime) ? CollectionAttributes.DEFAULT_EXPIRETIME : expTime; + this.maxCount = (null == maxCount) ? CollectionAttributes.DEFAULT_MAXCOUNT : maxCount; + this.overflowAction = overflowAction; + this.readable = readable; + this.noreply = noreply; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + + b.append(flags); + //TODO:collection insert with creation option + b.append(' ').append(expTime); + b.append(' ').append(maxCount); + + if (null != overflowAction) { + b.append(' ').append(overflowAction); + } + + if (null != readable && !readable) { + b.append(' ').append("unreadable"); + } + + if (noreply) { + b.append((b.length() <= 0)? "" : " ").append("noreply"); + } + + str = b.toString(); + return str; + } + + public String toString() { + return (str != null)? str : stringify(); + } + + public abstract String getCommand(); +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionDelete.java b/src/main/java/net/spy/memcached/collection/CollectionDelete.java new file mode 100644 index 000000000..574e53b04 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionDelete.java @@ -0,0 +1,55 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class CollectionDelete { + + protected String range; + protected boolean noreply; + protected boolean dropIfEmpty = true; + + protected String str; + protected byte[] data = { }; + + public String getRange() { + return range; + } + + public void setRange(String range) { + this.range = range; + } + + public boolean isNoreply() { + return noreply; + } + + public void setNoreply(boolean noreply) { + this.noreply = noreply; + } + + public byte[] getData() { + return data; + } + + public void setData(byte[] data) { + this.data = data; + } + + public abstract String stringify(); + public abstract String getCommand(); + +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionExist.java b/src/main/java/net/spy/memcached/collection/CollectionExist.java new file mode 100644 index 000000000..2790a9771 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionExist.java @@ -0,0 +1,59 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class CollectionExist { + + protected T value; + protected byte[] data; + + protected String str; + + public CollectionExist() { } + + public CollectionExist(T value, byte[] data) { + this.value = value; + this.data = data; + } + + public void setValue(T value) { + this.value = value; + } + + public T getValue() { + return value; + } + + public byte[] getData() { + return data; + } + + public void setData(byte[] data) { + this.data = data; + } + + public String stringify() { + return ""; + } + + public String toString() { + return (str != null)? str : stringify(); + } + + public abstract String getCommand(); + +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionGet.java b/src/main/java/net/spy/memcached/collection/CollectionGet.java new file mode 100644 index 000000000..ad6a7ffae --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionGet.java @@ -0,0 +1,71 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class CollectionGet { + + protected boolean delete = false; + protected boolean dropIfEmpty = true; + + protected String str; + protected int headerCount; + protected long subkey; + protected int dataLength; + + protected byte[] elementFlag; + + public boolean isDelete() { + return delete; + } + + public void setDelete(boolean delete) { + this.delete = delete; + } + + public long getSubkey() { + return subkey; + } + + public int getDataLength() { + return dataLength; + } + + public byte[] getElementFlag() { + return elementFlag; + } + + public boolean headerReady(int spaceCount) { + return headerCount == spaceCount; + } + + public void setHeaderCount(int headerCount) { + this.headerCount = headerCount; + } + + public int getHeaderCount() { + return headerCount; + } + + public boolean eachRecordParseCompleted() { + return true; + } + + public abstract String stringify(); + public abstract String getCommand(); + public abstract void decodeItemHeader(String itemHeader); + +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionMutate.java b/src/main/java/net/spy/memcached/collection/CollectionMutate.java new file mode 100644 index 000000000..597cc3ac3 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionMutate.java @@ -0,0 +1,24 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class CollectionMutate { + protected String str; + + public abstract String stringify(); + public abstract String getCommand(); +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionObject.java b/src/main/java/net/spy/memcached/collection/CollectionObject.java new file mode 100644 index 000000000..534290bcb --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionObject.java @@ -0,0 +1,45 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.nio.ByteBuffer; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.compat.SpyObject; + +public class CollectionObject extends SpyObject { + + protected static final byte[] CRLF={'\r', '\n'}; + + /** + * Set some arguments for an operation into the given byte buffer. + */ + protected final void setArguments(ByteBuffer bb, Object... args) { + boolean wasFirst=true; + for(Object o : args) { + String s = String.valueOf(o); + if(wasFirst) { + wasFirst=false; + } else if (!"".equals(s)) { + bb.put((byte)' '); + } + bb.put(KeyUtil.getKeyBytes(s)); + } + bb.put(CRLF); + } + +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionOverflowAction.java b/src/main/java/net/spy/memcached/collection/CollectionOverflowAction.java new file mode 100644 index 000000000..437968dd1 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionOverflowAction.java @@ -0,0 +1,80 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +/** + * Specifies what to do when the number of elements already reached the Maximum + * allowable element count in a collection upon insertion. + */ +public enum CollectionOverflowAction { + /** + * If set, the collection storing operation would be failed when the number + * of elements already reached the maximum count. + */ + error, + + /** + *

List Only

+ * If set, the new value would be inserted after the deletion of the head + * value when the number of elements already reached the maximum count. + */ + head_trim, + + /** + *

List Only

+ * If set, the new value would be inserted after the deletion of the tail + * value when the number of elements already reached the maximum count. + */ + tail_trim, + + /** + *

B+Tree Only

+ * If set, the new entry would be inserted after the deletion of the + * smallest bkey entry when the number of elements already reached the + * maximum count. + */ + smallest_trim, + + /** + *

B+Tree Only

+ * If set, the new entry would be inserted after the deletion of the largest + * bkey entry when the number of elements already reached the maximum count. + */ + largest_trim, + + /** + *

B+Tree Only

+ * If set, the new entry would be inserted after the deletion of the + * smallest bkey entry when the number of elements already reached the + * maximum count. + * but, it does not return a TRIMMED message, even if a btree item was trimmed. + */ + smallest_silent_trim, + + /** + *

B+Tree Only

+ * If set, the new entry would be inserted after the deletion of the largest + * bkey entry when the number of elements already reached the maximum count. + * but, it does not return a TRIMMED message, even if a btree item was trimmed. + */ + largest_silent_trim, + + /** + * + */ + unknown +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionPipedStore.java b/src/main/java/net/spy/memcached/collection/CollectionPipedStore.java new file mode 100644 index 000000000..e107ad9f6 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionPipedStore.java @@ -0,0 +1,353 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import net.spy.memcached.CachedData; +import net.spy.memcached.KeyUtil; +import net.spy.memcached.transcoders.Transcoder; + +public abstract class CollectionPipedStore extends CollectionObject { + + public static final String PIPE = "pipe"; + public static final int MAX_PIPED_ITEM_COUNT = 500; + + protected String key; + protected boolean createKeyIfNotExists; + protected Transcoder tc; + protected int itemCount; + + protected CollectionAttributes attribute; + + public abstract ByteBuffer getAsciiCommand(); + public abstract ByteBuffer getBinaryCommand(); + + /** + * + */ + public static class ListPipedStore extends CollectionPipedStore { + + private static final String COMMAND = "lop insert"; + private Collection list; + private int index; + + public ListPipedStore(String key, int index, Collection list, + boolean createKeyIfNotExists, CollectionAttributes attr, Transcoder tc) { + this.key = key; + this.index = index; + this.list = list; + this.createKeyIfNotExists = createKeyIfNotExists; + this.attribute = attr; + this.tc = tc; + this.itemCount = list.size(); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // decode values + Collection encodedList = new ArrayList(list.size()); + CachedData cd = null; + for (T each : list) { + cd = tc.encode(each); + encodedList.add(cd.getData()); + } + + // estimate the buffer capacity + for (byte[] each : encodedList) { + capacity += KeyUtil.getKeyBytes(key).length; + capacity += each.length; + capacity += 64; + } + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + Iterator iterator = encodedList.iterator(); + while (iterator.hasNext()) { + byte[] each = iterator.next(); + setArguments(bb, COMMAND, key, index, each.length, + (createKeyIfNotExists) ? "create" : "", (createKeyIfNotExists) ? cd.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute.getExpireTime() != null) ? attribute.getExpireTime() : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute.getMaxCount() != null) ? attribute.getMaxCount() : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(each); + bb.put(CRLF); + } + + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + /** + * + */ + public static class SetPipedStore extends CollectionPipedStore { + + private static final String COMMAND = "sop insert"; + private Collection set; + + public SetPipedStore(String key, Collection set, boolean createKeyIfNotExists, + CollectionAttributes attr, Transcoder tc) { + this.key = key; + this.set = set; + this.createKeyIfNotExists = createKeyIfNotExists; + this.attribute = attr; + this.tc = tc; + this.itemCount = set.size(); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // decode values + Collection encodedList = new ArrayList(set.size()); + CachedData cd = null; + for (T each : set) { + cd = tc.encode(each); + encodedList.add(cd.getData()); + } + + // estimate the buffer capacity + for (byte[] each : encodedList) { + capacity += KeyUtil.getKeyBytes(key).length; + capacity += each.length; + capacity += 64; + } + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + Iterator iterator = encodedList.iterator(); + while (iterator.hasNext()) { + byte[] each = iterator.next(); + + setArguments(bb, COMMAND, key, each.length, + (createKeyIfNotExists) ? "create" : "", (createKeyIfNotExists) ? cd.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute.getExpireTime() != null) ? attribute.getExpireTime() : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute.getMaxCount() != null) ? attribute.getMaxCount() : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(each); + bb.put(CRLF); + } + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + /** + * + */ + public static class BTreePipedStore extends CollectionPipedStore { + + private static final String COMMAND = "bop insert"; + private Map map; + + public BTreePipedStore(String key, Map map, boolean createKeyIfNotExists, + CollectionAttributes attr, Transcoder tc) { + this.key = key; + this.map = map; + this.createKeyIfNotExists = createKeyIfNotExists; + this.attribute = attr; + this.tc = tc; + this.itemCount = map.size(); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // decode parameters + List decodedList = new ArrayList(map.size()); + CachedData cd = null; + for (T each : map.values()) { + cd = tc.encode(each); + decodedList.add(cd.getData()); + } + + // estimate the buffer capacity + int i = 0; + for (Long eachBkey : map.keySet()) { + capacity += KeyUtil.getKeyBytes(key).length; + capacity += KeyUtil.getKeyBytes(String.valueOf(eachBkey)).length; + capacity += decodedList.get(i++).length; + capacity += 64; + } + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + i = 0; + Iterator iterator = map.keySet().iterator(); + while (iterator.hasNext()) { + Long bkey = iterator.next(); + byte[] value = decodedList.get(i++); + + setArguments(bb, COMMAND, key, bkey, value.length, + (createKeyIfNotExists) ? "create" : "", (createKeyIfNotExists) ? cd.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute.getExpireTime() != null) ? attribute.getExpireTime() : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute.getMaxCount() != null) ? attribute.getMaxCount() : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(value); + bb.put(CRLF); + } + + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + /** + * + */ + public static class ByteArraysBTreePipedStore extends + CollectionPipedStore { + + private static final String COMMAND = "bop insert"; + private List> elements; + + public ByteArraysBTreePipedStore(String key, List> elements, + boolean createKeyIfNotExists, CollectionAttributes attr, + Transcoder tc) { + this.key = key; + this.elements = elements; + this.createKeyIfNotExists = createKeyIfNotExists; + this.attribute = attr; + this.tc = tc; + this.itemCount = elements.size(); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // decode parameters + List decodedList = new ArrayList(elements.size()); + CachedData cd = null; + for (Element each : elements) { + cd = tc.encode(each.getValue()); + decodedList.add(cd.getData()); + } + + // estimate the buffer capacity + int i = 0; + for (Element each : elements) { + capacity += KeyUtil.getKeyBytes(key).length; + capacity += KeyUtil.getKeyBytes((each.isByteArraysBkey() ? each + .getBkeyByHex() : String.valueOf(each.getLongBkey()))).length; + capacity += KeyUtil.getKeyBytes(each.getFlagByHex()).length; + capacity += decodedList.get(i++).length; + capacity += 64; + } + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + i = 0; + Iterator> iterator = elements.iterator(); + while (iterator.hasNext()) { + Element element = iterator.next(); + byte[] value = decodedList.get(i++); + + setArguments( + bb, + COMMAND, + key, + (element.isByteArraysBkey() ? element.getBkeyByHex() + : String.valueOf(element.getLongBkey())), + element.getFlagByHex(), + value.length, + (createKeyIfNotExists) ? "create" : "", + (createKeyIfNotExists) ? cd.getFlags() : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getExpireTime() != null) ? attribute + .getExpireTime() + : CollectionAttributes.DEFAULT_EXPIRETIME : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getMaxCount() != null) ? attribute + .getMaxCount() + : CollectionAttributes.DEFAULT_MAXCOUNT : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getOverflowAction() != null) ? attribute + .getOverflowAction().toString() + : "" : "", + (createKeyIfNotExists) ? (attribute != null && attribute + .getReadable() != null && !attribute.getReadable()) ? + "unreadable" : "" : "", + (iterator.hasNext()) ? PIPE : ""); + bb.put(value); + bb.put(CRLF); + } + + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public boolean iscreateKeyIfNotExists() { + return createKeyIfNotExists; + } + + public void setcreateKeyIfNotExists(boolean createKeyIfNotExists) { + this.createKeyIfNotExists = createKeyIfNotExists; + } + + public int getItemCount() { + return this.itemCount; + } +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionPipedUpdate.java b/src/main/java/net/spy/memcached/collection/CollectionPipedUpdate.java new file mode 100644 index 000000000..080a51598 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionPipedUpdate.java @@ -0,0 +1,176 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import net.spy.memcached.CachedData; +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.transcoders.Transcoder; + +public abstract class CollectionPipedUpdate extends CollectionObject { + + public static final String PIPE = "pipe"; + public static final int MAX_PIPED_ITEM_COUNT = 500; + + protected String key; + protected Transcoder tc; + protected int itemCount; + + public abstract ByteBuffer getAsciiCommand(); + + public abstract ByteBuffer getBinaryCommand(); + + public static class BTreePipedUpdate extends CollectionPipedUpdate { + + private static final String COMMAND = "bop update"; + private List> elements; + + public BTreePipedUpdate(String key, List> elements, + Transcoder tc) { + this.key = key; + this.elements = elements; + this.tc = tc; + this.itemCount = elements.size(); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // decode parameters + List decodedList = new ArrayList(elements.size()); + CachedData cd = null; + for (Element each : elements) { + if (each.getValue() != null) { + cd = tc.encode(each.getValue()); + decodedList.add(cd.getData()); + } else { + decodedList.add(null); + } + } + + // estimate the buffer capacity + int i = 0; + ElementFlagUpdate elementFlagUpdate; + byte[] elementFlag; + int flagOffset; + BitWiseOperands bitOp; + byte[] value; + StringBuilder b; + + for (Element each : elements) { + elementFlagUpdate = each.getElementFlagUpdate(); + if (elementFlagUpdate != null) { + // eflag + elementFlag = elementFlagUpdate.getElementFlag(); + if (elementFlag != null) { + capacity += KeyUtil.getKeyBytes(elementFlagUpdate + .getElementFlagByHex()).length; + + // fwhere bitwop + if (elementFlagUpdate.getElementFlagOffset() > -1) { + capacity += 6; + } + } + } + + capacity += KeyUtil.getKeyBytes(key).length; + capacity += KeyUtil.getKeyBytes((each.isByteArraysBkey() ? each + .getBkeyByHex() : String.valueOf(each.getLongBkey()))).length; + if (decodedList.get(i) != null) { + capacity += decodedList.get(i++).length; + } + capacity += 64; + } + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + i = 0; + + Iterator> iterator = elements.iterator(); + while (iterator.hasNext()) { + Element element = iterator.next(); + + flagOffset = -1; + bitOp = null; + elementFlag = null; + value = decodedList.get(i++); + elementFlagUpdate = element.getElementFlagUpdate(); + b = new StringBuilder(); + + // has element eflag update + if (elementFlagUpdate != null) { + elementFlag = elementFlagUpdate.getElementFlag(); + if (elementFlag != null) { + if (elementFlag.length > 0) { + // use fwhere bitop + flagOffset = elementFlagUpdate.getElementFlagOffset(); + bitOp = elementFlagUpdate.getBitOp(); + if (flagOffset > -1 && bitOp != null) { + b.append(flagOffset).append(" ").append(bitOp) + .append(" "); + } + + b.append(elementFlagUpdate.getElementFlagByHex()); + } else { + b.append("0"); + } + } + } + + setArguments(bb, COMMAND, key, + (element.isByteArraysBkey() ? element.getBkeyByHex() + : String.valueOf(element.getLongBkey())), + b.toString(), (value == null ? -1 : value.length), + (iterator.hasNext()) ? PIPE : ""); + if (value != null) { + if (value.length > 0) { + bb.put(value); + } + bb.put(CRLF); + } + } + + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public int getItemCount() { + return this.itemCount; + } +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionResponse.java b/src/main/java/net/spy/memcached/collection/CollectionResponse.java new file mode 100644 index 000000000..35e87a3bd --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionResponse.java @@ -0,0 +1,95 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.HashSet; +import java.util.Set; + +/** + * Operation status types for collections + */ +public enum CollectionResponse { + + OK, + END, + NOT_FOUND, + NOT_FOUND_ELEMENT, + ELEMENT_EXISTS, + CREATED_STORED, + STORED, + REPLACED, + DELETED, + DELETED_DROPPED, + TYPE_MISMATCH, + LENGTH_MISMATCH, + OVERFLOWED, + OUT_OF_RANGE, + ATTR_ERROR_NOT_FOUND, + ATTR_ERROR_BAD_VALUE, + EXIST, + NOT_EXIST, + + UNDEFINED, + CANCELED, + + INTERRUPT_EXCEPTION, + EXECUTION_EXCEPTION, + TIMEOUT_EXCEPTION, + EXCEPTION, + + UPDATED, + BKEY_MISMATCH, + EFLAG_MISMATCH, + + CREATED, + EXISTS, + SERVER_ERROR, + + /** + * Command pipelining result + */ + RESPONSE, + + /** + * read only collection + */ + UNREADABLE, + + DUPLICATED, + TRIMMED, + DUPLICATED_TRIMMED, + ATTR_MISMATCH, + NOTHING_TO_UPDATE; + + private static final Set ENUM_STRINGS; + + static { + CollectionResponse[] values = CollectionResponse.values(); + ENUM_STRINGS = new HashSet(values.length); + for (CollectionResponse e : values) { + ENUM_STRINGS.add(e.toString()); + } + } + + public static CollectionResponse resolve(String s) { + if (ENUM_STRINGS.contains(s)) { + return CollectionResponse.valueOf(s); + } else { + return UNDEFINED; + } + } +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionStore.java b/src/main/java/net/spy/memcached/collection/CollectionStore.java new file mode 100644 index 000000000..0f13939e8 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionStore.java @@ -0,0 +1,148 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +public abstract class CollectionStore { + + protected boolean createKeyIfNotExists = false; + protected int flags = 0; + protected T value; + protected RequestMode requestMode; + + protected CollectionAttributes attribute; + + protected byte[] elementFlag; + + protected String str; + + public CollectionStore() { } + + public CollectionStore(T value, byte[] elementFlag, boolean createKeyIfNotExists, RequestMode requestMode, CollectionAttributes attr) { + if (elementFlag != null && elementFlag.length > ElementFlagFilter.MAX_EFLAG_LENGTH) { + throw new IllegalArgumentException("Length of elementFlag must be less than " + ElementFlagFilter.MAX_EFLAG_LENGTH); + } + + this.value = value; + this.elementFlag = elementFlag; + this.createKeyIfNotExists = createKeyIfNotExists; + this.requestMode = requestMode; + this.attribute = attr; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + + if (createKeyIfNotExists) { + b.append("create ").append(flags); + if (attribute != null) { + b.append(" ") + .append((attribute.getExpireTime() == null) ? CollectionAttributes.DEFAULT_EXPIRETIME + : attribute.getExpireTime()); + b.append(" ") + .append((attribute.getMaxCount() == null) ? CollectionAttributes.DEFAULT_MAXCOUNT + : attribute.getMaxCount()); + + if (null != attribute.getOverflowAction()) { + b.append(" ").append(attribute.getOverflowAction()); + } + + if (null != attribute.getReadable() && !attribute.getReadable()) { + b.append(" ").append("unreadable"); + } + } + else { + b.append(" ").append(CollectionAttributes.DEFAULT_EXPIRETIME); + b.append(" ").append(CollectionAttributes.DEFAULT_MAXCOUNT); + } + } + + // an optional request mode like noreply, pipe and getrim + if (requestMode != null) { + b.append((b.length() <= 0)? "" : " ").append(requestMode.getAscii()); + } + + str = b.toString(); + return str; + } + + public byte[] getElementFlag() { + return elementFlag; + } + + public String getElementFlagByHex() { + if (elementFlag == null) { + return ""; + } + + if (elementFlag.length == 0) { + return "0"; + } + + return BTreeUtil.toHex(elementFlag); + } + + public boolean iscreateKeyIfNotExists() { + return createKeyIfNotExists; + } + + public void setcreateKeyIfNotExists(boolean createKeyIfNotExists) { + this.createKeyIfNotExists = createKeyIfNotExists; + } + + public int getFlags() { + return flags; + } + + public void setFlags(int flags) { + this.flags = flags; + } + + public T getValue() { + return value; + } + + public void setValue(T value) { + this.value = value; + } + + public RequestMode getRequestMode() { + return requestMode; + } + + public void setRequestMode(RequestMode requestMode) { + this.requestMode = requestMode; + } + + public void setElementFlag(byte[] elementFlag) { + this.elementFlag = elementFlag; + } + + public void setCollectionAttributes(CollectionAttributes attributes) { + this.attribute = attributes; + } + + public String toString() { + return (str != null)? str : stringify(); + } + + public abstract String getCommand(); + +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionType.java b/src/main/java/net/spy/memcached/collection/CollectionType.java new file mode 100644 index 000000000..9df0a476a --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionType.java @@ -0,0 +1,60 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +/** + * Supported collection types. + */ +public enum CollectionType { + + /** + * Key-value + */ + kv("kv"), + /** + * List collection + */ + list("list"), + /** + * Set collection + */ + set("set"), + /** + * B+ tree collection + */ + btree("b+") + ; + + String stringValue; + CollectionType(String stringValue) { + this.stringValue = stringValue; + } + + public String getStringValue() { + return stringValue; + } + + public static CollectionType find(String value) { + for (CollectionType each : CollectionType.values()) { + if (each.stringValue.equals(value)) { + return each; + } + } + return null; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/CollectionUpdate.java b/src/main/java/net/spy/memcached/collection/CollectionUpdate.java new file mode 100644 index 000000000..f34b6d3d7 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/CollectionUpdate.java @@ -0,0 +1,141 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.util.BTreeUtil; + +public abstract class CollectionUpdate { + + protected boolean createKeyIfNotExists = false; + protected int flags = 0; + protected T newValue; + protected boolean noreply = false; + + protected int flagOffset = -1; + protected BitWiseOperands bitOp; + protected byte[] elementFlag; + + protected String str; + + public CollectionUpdate() { + } + + public CollectionUpdate(T newValue, ElementFlagUpdate elementFlagUpdate, boolean noreply) { + if (elementFlagUpdate == null) { + this.newValue = newValue; + this.flagOffset = -1; + this.bitOp = null; + this.elementFlag = null; + } else { + if (newValue == null && elementFlagUpdate.getElementFlag() == null) { + throw new IllegalArgumentException( + "One of the newValue or elementFlag must not be null."); + } + + if (elementFlagUpdate.getElementFlag().length > ElementFlagFilter.MAX_EFLAG_LENGTH) { + throw new IllegalArgumentException( + "length of element flag cannot exceed " + + ElementFlagFilter.MAX_EFLAG_LENGTH); + } + + this.newValue = newValue; + this.flagOffset = elementFlagUpdate.getElementFlagOffset(); + this.bitOp = elementFlagUpdate.getBitOp(); + this.elementFlag = elementFlagUpdate.getElementFlag(); + } + + this.noreply = noreply; + } + + public String stringify() { + if (str != null) + return str; + + StringBuilder b = new StringBuilder(); + + if (flagOffset > -1 && bitOp != null && elementFlag != null) { + b.append(flagOffset).append(" ").append(bitOp).append(" "); + } + + if (elementFlag != null) { + b.append(getElementFlagByHex()); + } + + if (noreply) { + b.append((b.length() <= 0) ? "" : " ").append("noreply"); + } + + str = b.toString(); + return str; + } + + public String getElementFlagByHex() { + if (elementFlag == null) { + return ""; + } + + if (elementFlag.length == 0) { + return "0"; + } + + return BTreeUtil.toHex(elementFlag); + } + + public boolean iscreateKeyIfNotExists() { + return createKeyIfNotExists; + } + + public void setcreateKeyIfNotExists(boolean createKeyIfNotExists) { + this.createKeyIfNotExists = createKeyIfNotExists; + } + + public int getFlags() { + return flags; + } + + public void setFlags(int flags) { + this.flags = flags; + } + + public T getNewValue() { + return newValue; + } + + public void setNewValue(T newValue) { + this.newValue = newValue; + } + + public boolean isNoreply() { + return noreply; + } + + public void setNoreply(boolean noreply) { + this.noreply = noreply; + } + + public void setElementFlag(byte[] elementFlag) { + this.elementFlag = elementFlag; + } + + public String toString() { + return (str != null) ? str : stringify(); + } + + public abstract String getCommand(); + +} diff --git a/src/main/java/net/spy/memcached/collection/Element.java b/src/main/java/net/spy/memcached/collection/Element.java new file mode 100644 index 000000000..dcfa38531 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/Element.java @@ -0,0 +1,160 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +/** + * Collection element + * + * @param + */ +public class Element { + private final byte[] bkey; + private final Long longBkey; + private final T value; + private final byte[] eflag; + private final ElementFlagUpdate elementFlagUpdate; + + private final boolean isByteArraysBkey; + + /** + * Create an element + * + * @param bkey key of element + * @param value value of element + * @param eflag flag of element (minimun length is 1. maximum length is 31) + */ + public Element(byte[] bkey, T value, byte[] eflag) { + this.bkey = bkey; + this.longBkey = null; + this.value = value; + this.eflag = eflag; + this.isByteArraysBkey = true; + this.elementFlagUpdate = null; + } + + public Element(long bkey, T value, byte[] eflag) { + this.bkey = null; + this.longBkey = bkey; + this.value = value; + this.eflag = eflag; + this.isByteArraysBkey = false; + this.elementFlagUpdate = null; + } + + public Element(byte[] bkey, T value, ElementFlagUpdate elementFlagUpdate) { + this.bkey = bkey; + this.longBkey = null; + this.value = value; + this.eflag = null; + this.isByteArraysBkey = true; + this.elementFlagUpdate = elementFlagUpdate; + } + + public Element(long bkey, T value, ElementFlagUpdate elementFlagUpdate) { + this.bkey = null; + this.longBkey = bkey; + this.value = value; + this.eflag = null; + this.isByteArraysBkey = false; + this.elementFlagUpdate = elementFlagUpdate; + } + + /** + * get value of element flag by hex. + * + * @return element flag by hex (e.g. 0x01) + */ + public String getFlagByHex() { + // convert to hex based on its real byte array + return BTreeUtil.toHex(eflag); + } + + /** + * get bkey + * + * @return bkey by hex (e.g. 0x01) + */ + public String getBkeyByHex() { + return BTreeUtil.toHex(bkey); + } + + /** + * get bkey + * + * @return bkey by byte[] + */ + public byte[] getByteArrayBkey() { + return bkey; + } + + /** + * get bkey + * + * @return bkey (-1 if not available) + */ + public long getLongBkey() { + return (longBkey == null)? -1 : longBkey; + } + + /** + * get value + * + * @return value + */ + public T getValue() { + return value; + } + + /** + * get flag + * + * @return element flag + */ + public byte[] getFlag() { + return eflag; + } + + public boolean isByteArraysBkey() { + return isByteArraysBkey; + } + + public ElementFlagUpdate getElementFlagUpdate() { + return elementFlagUpdate; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{ \""); + if (isByteArraysBkey) { + sb.append(getBkeyByHex()); + } else { + sb.append(getLongBkey()); + } + sb.append("\" : { "); + + sb.append(" \"eflag\" : \"").append(BTreeUtil.toHex(eflag)).append("\""); + sb.append(","); + sb.append(" \"value\" : \"").append(value.toString()).append("\""); + sb.append(" }"); + + return sb.toString(); + } + +} diff --git a/src/main/java/net/spy/memcached/collection/ElementFlagFilter.java b/src/main/java/net/spy/memcached/collection/ElementFlagFilter.java new file mode 100644 index 000000000..3ca8fa703 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ElementFlagFilter.java @@ -0,0 +1,181 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +/** + * element flag filter + */ +public class ElementFlagFilter { + + /** + * Do not filter. + */ + public static final ElementFlagFilter DO_NOT_FILTER = null; + + /** + * Empty element flag. + */ + public static final byte[] EMPTY_ELEMENT_FLAG = null; + + /** + * Max element flag length. + */ + public static final int MAX_EFLAG_LENGTH = 31; + + // compare offset + protected int fwhere = 0; + + // bitwise comparison (optional) + protected BitWiseOperands bitOp = null; + protected byte[] bitCompValue = null; + + // comparison + protected CompOperands compOp; + protected byte[] compValue; + + public ElementFlagFilter() { + } + + /** + * create element flag filter + * + * @param compOperand + * comparison operand + * @param compValue + * comparison value + */ + public ElementFlagFilter(CompOperands compOperand, byte[] compValue) { + if (compOperand == null || compValue == null) { + throw new NullPointerException("Invalid compOperand and compValue."); + } + + if (compValue.length == 0) { + throw new IllegalArgumentException( + "Length of comparison value must be larger than 0."); + } + + if (compValue.length > MAX_EFLAG_LENGTH) { + throw new IllegalArgumentException( + "Length of comparison value must be less than " + MAX_EFLAG_LENGTH); + } + + this.compOp = compOperand; + this.compValue = compValue; + } + + /** + * set bitwise compare + * + * @param bitOp + * bitwise operand + * @param bitCompValue + * bitwise comparison value + * @return element flag filter + */ + public ElementFlagFilter setBitOperand(BitWiseOperands bitOp, + byte[] bitCompValue) { + if (bitOp == null || bitCompValue == null) { + throw new NullPointerException("Invalid compOperand and compValue."); + } + + if (bitCompValue.length == 0) { + throw new IllegalArgumentException( + "Length of bit comparison value must be larger than 0."); + } + + if (bitCompValue.length > MAX_EFLAG_LENGTH) { + throw new IllegalArgumentException( + "Length of bit comparison value must be less than " + MAX_EFLAG_LENGTH); + } + + this.bitOp = bitOp; + this.bitCompValue = bitCompValue; + return this; + } + + /** + * set bitwise compare offset + * + * @param offset + * 0-base offset. this value must less than length of exists + * element flag. + * @return element flag filter + */ + public ElementFlagFilter setCompareOffset(int offset) { + this.fwhere = offset; + return this; + } + + protected boolean isBitWiseOpEnabled() { + return bitOp != null && bitCompValue != null; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append(fwhere).append(" "); + + if (isBitWiseOpEnabled()) { + sb.append(bitOp).append(" "); + sb.append(BTreeUtil.toHex(bitCompValue)).append(" "); + } + + sb.append(compOp).append(" "); + sb.append(BTreeUtil.toHex(compValue)); + + return sb.toString(); + } + + /** + * Comparison Operands + */ + public enum CompOperands { + Equal("EQ"), NotEqual("NE"), LessThan("LT"), LessOrEqual("LE"), GreaterThan( + "GT"), GreaterOrEqual("GE"); + + private String op; + + CompOperands(String operand) { + op = operand; + } + + public String toString() { + return op; + } + } + + /** + * Bitwise comparison operands + * + */ + public enum BitWiseOperands { + AND("&"), OR("|"), XOR("^"); + + private String op; + + BitWiseOperands(String operand) { + op = operand; + } + + public String toString() { + return op; + } + } +} diff --git a/src/main/java/net/spy/memcached/collection/ElementFlagUpdate.java b/src/main/java/net/spy/memcached/collection/ElementFlagUpdate.java new file mode 100644 index 000000000..20066b8ba --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ElementFlagUpdate.java @@ -0,0 +1,111 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.util.BTreeUtil; + +/** + * Element flag + */ +public class ElementFlagUpdate { + + public static final ElementFlagUpdate RESET_FLAG = new ElementFlagUpdate(); + + private final int elementFlagOffset; + private final BitWiseOperands bitOp; + private final byte[] elementFlag; + + private ElementFlagUpdate() { + this.elementFlag = new byte[] {}; + this.elementFlagOffset = -1; + this.bitOp = null; + } + + /** + * create element flag update + * + * @param elementFlag + * new element flag + */ + public ElementFlagUpdate(byte[] elementFlag) { + if (elementFlag == null) { + throw new IllegalArgumentException("element flag may not null."); + } + if (elementFlag.length < 1 || elementFlag.length > 31) { + throw new IllegalArgumentException( + "length of element flag must be between 1 and 31"); + } + this.elementFlag = elementFlag; + this.elementFlagOffset = -1; + this.bitOp = null; + } + + /** + * create element flag update + * + * @param elementFlagOffset + * bitwise update offset + * @param bitOp + * bitwise operand + * @param elementFlag + * element flag to bitwise operation + */ + public ElementFlagUpdate(int elementFlagOffset, BitWiseOperands bitOp, + byte[] elementFlag) { + if (elementFlagOffset < 0) { + throw new IllegalArgumentException( + "elementFlagOffset must be larger than 0."); + } + if (bitOp == null) { + throw new IllegalArgumentException("bitOp may not null."); + } + if (elementFlag == null) { + throw new IllegalArgumentException("element flag may not null."); + } + if (elementFlag.length < 1 || elementFlag.length > 31) { + throw new IllegalArgumentException( + "length of element flag must be between 1 and 31"); + } + this.elementFlagOffset = elementFlagOffset; + this.bitOp = bitOp; + this.elementFlag = elementFlag; + } + + public int getElementFlagOffset() { + return elementFlagOffset; + } + + public BitWiseOperands getBitOp() { + return bitOp; + } + + public byte[] getElementFlag() { + return elementFlag; + } + + /** + * get value of element flag by hex. + * + * @return element flag by hex (e.g. 0x01) + */ + public String getElementFlagByHex() { + // convert to hex based on its real byte array + return BTreeUtil.toHex(elementFlag); + } + +} diff --git a/src/main/java/net/spy/memcached/collection/ElementMultiFlagsFilter.java b/src/main/java/net/spy/memcached/collection/ElementMultiFlagsFilter.java new file mode 100644 index 000000000..1e3df4ac3 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ElementMultiFlagsFilter.java @@ -0,0 +1,104 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.ArrayList; + +import net.spy.memcached.util.BTreeUtil; + +public class ElementMultiFlagsFilter extends ElementFlagFilter { + + final static int MAX_EFLAGS = 100; + private ArrayList compValue = new ArrayList(); + + public ElementMultiFlagsFilter() { + } + + public ElementMultiFlagsFilter setCompOperand(CompOperands compOperand) { + if (compOperand == null) { + throw new NullPointerException("Invalid compOperand"); + } + + if (compOperand != CompOperands.Equal + && compOperand != CompOperands.NotEqual) { + throw new IllegalArgumentException( + "The only compOperand Equal and NotEqual can compare multi compValues."); + } + + this.compOp = compOperand; + + return this; + } + + public ElementMultiFlagsFilter addCompValue(byte[] compValue) { + + if (compValue == null) { + throw new NullPointerException("Invalid compOperand and compValue."); + } + + if (compValue.length == 0) { + throw new IllegalArgumentException( + "Length of comparison value must be larger than 0."); + } + + if (compValue.length > MAX_EFLAG_LENGTH) { + throw new IllegalArgumentException( + "Length of comparison value must be less than " + + MAX_EFLAG_LENGTH); + } + + if (this.compValue.size() > MAX_EFLAGS) { + throw new IllegalArgumentException( + "Count of comparison values must be less than " + + MAX_EFLAGS); + } + + if (this.compValue.size() > 0 + && this.compValue.get(0).length != compValue.length) { + throw new IllegalArgumentException( + "Length of comparison value must be same with " + + this.compValue.get(0).length); + } + + this.compValue.add(compValue); + + return this; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + + sb.append(fwhere).append(" "); + + if (isBitWiseOpEnabled()) { + sb.append(bitOp).append(" "); + sb.append(BTreeUtil.toHex(bitCompValue)).append(" "); + } + + sb.append(compOp).append(" "); + + for (int i = 0; i < compValue.size(); i++) { + if (i > 0) { + sb.append(","); + } + sb.append(BTreeUtil.toHex(compValue.get(i))); + } + + return sb.toString(); + } +} diff --git a/src/main/java/net/spy/memcached/collection/ElementValueType.java b/src/main/java/net/spy/memcached/collection/ElementValueType.java new file mode 100644 index 000000000..270bbc573 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ElementValueType.java @@ -0,0 +1,71 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public abstract class ElementValueType { + + public static final ElementValueType STRING = new StringType(); + public static final ElementValueType LONG = new LongType(); + public static final ElementValueType INTEGER = new IntegerType(); + public static final ElementValueType BOOLEAN = new BooleanType(); + public static final ElementValueType DATE = new DateType(); + public static final ElementValueType BYTE = new ByteType(); + public static final ElementValueType FLOAT = new FloatType(); + public static final ElementValueType DOUBLE = new DoubleType(); + public static final ElementValueType BYTEARRAY = new ByteArrayType(); + public static final ElementValueType OTHERS = new OtherObjectType(); + + private static class StringType extends ElementValueType { + + } + + private static class LongType extends ElementValueType { + + } + + private static class IntegerType extends ElementValueType { + + } + + private static class BooleanType extends ElementValueType { + + } + + private static class DateType extends ElementValueType { + + } + + private static class ByteType extends ElementValueType { + + } + + private static class FloatType extends ElementValueType { + + } + + private static class DoubleType extends ElementValueType { + + } + + private static class ByteArrayType extends ElementValueType { + + } + + private static class OtherObjectType extends ElementValueType { + + } +} diff --git a/src/main/java/net/spy/memcached/collection/ExtendedBTreeGet.java b/src/main/java/net/spy/memcached/collection/ExtendedBTreeGet.java new file mode 100644 index 000000000..8d0400d92 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ExtendedBTreeGet.java @@ -0,0 +1,137 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Map; + +import net.spy.memcached.util.BTreeUtil; + +public class ExtendedBTreeGet extends CollectionGet { + + private static final String command = "bop get"; + + protected String range; + protected int offset = -1; + protected int count = -1; + protected Map map; + + protected byte[] subkey; + + protected ElementFlagFilter elementFlagFilter; + + public ExtendedBTreeGet(byte[] from, byte[] to, int offset, + int count, boolean delete, boolean dropIfEmpty, + ElementFlagFilter elementFlagFilter) { + this.headerCount = 2; + this.range = BTreeUtil.toHex(from) + ".." + BTreeUtil.toHex(to); + this.offset = offset; + this.count = count; + this.delete = delete; + this.dropIfEmpty = dropIfEmpty; + this.elementFlagFilter = elementFlagFilter; + } + + public ElementFlagFilter getElementFlagFilter() { + return elementFlagFilter; + } + + public String getRange() { + return range; + } + + public void setRange(String range) { + this.range = range; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } + + public Map getMap() { + return map; + } + + public byte[] getLongSubkey() { + return subkey; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + b.append(range); + + if (elementFlagFilter != null) b.append(" ").append(elementFlagFilter.toString()); + if (offset > 0) b.append(" ").append(offset); + if (count > 0) b.append(" ").append(count); + if (delete && dropIfEmpty) b.append(" drop"); + if (delete && !dropIfEmpty) b.append(" delete"); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public void resetHeaderCount(int count) { + this.headerCount = count; + } + + private int headerParseStep = 1; + + private boolean elementFlagExists = false; + + public boolean eachRecordParseCompleted() { + if (elementFlagExists) { + return headerParseStep == 1; + } else { + return true; + } + } + + @Override + public boolean headerReady(int spaceCount) { + return spaceCount == 2 || spaceCount == 3; + } + + public void decodeItemHeader(String itemHeader) { + String[] splited = itemHeader.split(" "); + + if (headerParseStep == 1) { + // found element flag. + if (splited[1].startsWith("0x")) { + this.elementFlagExists = true; + this.subkey = BTreeUtil.hexStringToByteArrays(splited[0].substring(2)); + this.elementFlag = BTreeUtil.hexStringToByteArrays(splited[1].substring(2)); +// this.headerCount++; + headerParseStep = 2; + } else { + this.subkey = BTreeUtil.hexStringToByteArrays(splited[0].substring(2)); + this.dataLength = Integer.parseInt(splited[1]); + } + } else { + this.headerParseStep = 1; + this.dataLength = Integer.parseInt(splited[1]); + } + } +} diff --git a/src/main/java/net/spy/memcached/collection/ListCreate.java b/src/main/java/net/spy/memcached/collection/ListCreate.java new file mode 100644 index 000000000..68ea0057b --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ListCreate.java @@ -0,0 +1,35 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class ListCreate extends CollectionCreate { + + private static final String command = "lop create"; + + public ListCreate() { + super(); + } + + public ListCreate(int flags, Integer expTime, Long maxCount, CollectionOverflowAction overflowAction, Boolean readable, boolean noreply) { + super(flags, expTime, maxCount, overflowAction, readable, noreply); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/ListDelete.java b/src/main/java/net/spy/memcached/collection/ListDelete.java new file mode 100644 index 000000000..798dbc09f --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ListDelete.java @@ -0,0 +1,65 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class ListDelete extends CollectionDelete { + + private static final String command = "lop delete"; + + public ListDelete(int index, boolean noreply) { + this.range = String.valueOf(index); + this.noreply = noreply; + } + + public ListDelete(int index, boolean noreply, boolean dropIfEmpty) { + this(index, noreply); + this.dropIfEmpty = dropIfEmpty; + } + + public ListDelete(int from, int to, boolean noreply) { + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.noreply = noreply; + } + + public ListDelete(int from, int to, boolean noreply, boolean dropIfEmpty) { + this(from, to, noreply); + this.dropIfEmpty = dropIfEmpty; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + b.append(range); + + if (dropIfEmpty) { + b.append(" drop"); + } + + if (noreply) { + b.append(" noreply"); + } + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/ListGet.java b/src/main/java/net/spy/memcached/collection/ListGet.java new file mode 100644 index 000000000..34f8f089b --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ListGet.java @@ -0,0 +1,84 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.List; + +public class ListGet extends CollectionGet { + + public static final int FIRST = 0; + public static final int LAST = -1; + + private static final String command = "lop get"; + + protected String range; + protected List list; + + public ListGet(int index, boolean delete) { + this.headerCount = 1; + this.range = String.valueOf(index); + this.delete = delete; + } + + public ListGet(int index, boolean delete, boolean dropIfEmpty) { + this(index, delete); + this.dropIfEmpty = dropIfEmpty; + } + + public ListGet(int from, int to, boolean delete) { + this.headerCount = 1; + this.range = String.valueOf(from) + ".." + String.valueOf(to); + this.delete = delete; + } + + public ListGet(int from, int to, boolean delete, boolean dropIfEmpty) { + this(from, to, delete); + this.dropIfEmpty = dropIfEmpty; + } + + public String getRange() { + return range; + } + + public void setRange(String range) { + this.range = range; + } + + public List getList() { + return list; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + b.append(range); + if (delete && dropIfEmpty) b.append(" drop"); + if (delete && !dropIfEmpty) b.append(" delete"); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public void decodeItemHeader(String itemHeader) { + this.dataLength = Integer.parseInt(itemHeader); + } +} diff --git a/src/main/java/net/spy/memcached/collection/ListStore.java b/src/main/java/net/spy/memcached/collection/ListStore.java new file mode 100644 index 000000000..bc47cecba --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/ListStore.java @@ -0,0 +1,35 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class ListStore extends CollectionStore { + + private static final String command = "lop insert"; + + public ListStore() { + super(); + } + + public ListStore(T value, boolean createKeyIfNotExists, RequestMode requestMode, CollectionAttributes attr) { + super(value, null, createKeyIfNotExists, requestMode, attr); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/RequestMode.java b/src/main/java/net/spy/memcached/collection/RequestMode.java new file mode 100644 index 000000000..7428db504 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/RequestMode.java @@ -0,0 +1,36 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +/** + * Optional request modes. + */ +public enum RequestMode { + + NO_REPLY("noreply"), PIPE("pipe"), GET_TRIM("getrim"); + + private final String ascii; + + RequestMode(String ascii) { + this.ascii = ascii; + } + + public String getAscii() { + return ascii; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/SMGetElement.java b/src/main/java/net/spy/memcached/collection/SMGetElement.java new file mode 100644 index 000000000..8f56b3a72 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SMGetElement.java @@ -0,0 +1,118 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.util.BTreeUtil; + +public class SMGetElement implements Comparable> { + + private String key; + private long bkey; + private byte[] bytebkey; + private T value; + + public SMGetElement(String key, long bkey, T value) { + this.key = key; + this.bkey = bkey; + this.bytebkey = null; + this.value = value; + } + + public SMGetElement(String key, byte[] bkey, T value) { + this.key = key; + this.bkey = -1; + this.bytebkey = bkey; + this.value = value; + } + + @Override + public String toString() { + return "SMGetElement {KEY:" + key + ", BKEY:" + + ((bytebkey == null) ? bkey : BTreeUtil.toHex(bytebkey)) + + ", VALUE:" + value + "}"; + } + + @Override + public int compareTo(SMGetElement param) { + // null is later than param. + if (key == null) { + return 1; + } + + if (param == null || param.getKey() == null) { + return 0; + } + + int compareKey = key.compareTo(param.getKey()); + + int compareBKey = ((bytebkey == null) ? (int) (param.getBkey() - bkey) + : BTreeUtil.compareByteArraysInLexOrder(param.getByteBkey(), bytebkey)); + + if (compareBKey != 0) { + return compareBKey; + } else { + return compareKey; + } + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public long getBkey() { + if (bkey == -1) { + throw new IllegalStateException("This element has byte[] bkey. " + toString()); + } + return bkey; + } + + public byte[] getByteBkey() { + if (bytebkey == null) { + throw new IllegalStateException( + "This element has java.lang.Long type bkey. " + toString()); + } + return bytebkey; + } + + public Object getBkeyByObject() { + if (bytebkey != null) { + return bytebkey; + } else { + return bkey; + } + } + + public void setBkey(long bkey) { + this.bkey = bkey; + } + + public T getValue() { + return value; + } + + public void setValue(T value) { + this.value = value; + } + + public boolean hasByteArrayBkey() { + return bytebkey != null; + } +} diff --git a/src/main/java/net/spy/memcached/collection/SetCreate.java b/src/main/java/net/spy/memcached/collection/SetCreate.java new file mode 100644 index 000000000..ae9c0e74f --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SetCreate.java @@ -0,0 +1,35 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class SetCreate extends CollectionCreate { + + private static final String command = "sop create"; + + public SetCreate() { + super(); + } + + public SetCreate(int flags, Integer expTime, Long maxCount, Boolean readable, boolean noreply) { + super(flags, expTime, maxCount, null, readable, noreply); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/SetDelete.java b/src/main/java/net/spy/memcached/collection/SetDelete.java new file mode 100644 index 000000000..c79ed4cb8 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SetDelete.java @@ -0,0 +1,72 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class SetDelete extends CollectionDelete { + + private static final String command = "sop delete"; + + protected T value; + protected byte[] data; + + public SetDelete(T value, boolean noreply) { + this.value = value; + this.noreply = noreply; + } + + public SetDelete(T value, boolean noreply, boolean dropIfEmpty) { + this(value, noreply); + this.dropIfEmpty = dropIfEmpty; + } + + public T getValue() { + return value; + } + + public void setValue(T value) { + this.value = value; + } + + public byte[] getData() { + return data; + } + + public void setData(byte[] data) { + this.data = data; + } + + public String stringify() { + StringBuilder b = new StringBuilder(); + b.append(data.length); + + if (dropIfEmpty) { + b.append(" drop"); + } + + if (noreply) { + b.append(" noreply"); + } + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/SetExist.java b/src/main/java/net/spy/memcached/collection/SetExist.java new file mode 100644 index 000000000..3707deaa4 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SetExist.java @@ -0,0 +1,33 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class SetExist extends CollectionExist { + + private static final String command = "sop exist"; + + public SetExist() { } + + public SetExist(T value, byte[] data) { + super(value, data); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/collection/SetGet.java b/src/main/java/net/spy/memcached/collection/SetGet.java new file mode 100644 index 000000000..fad3b897a --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SetGet.java @@ -0,0 +1,70 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Set; + +public class SetGet extends CollectionGet { + + private static final String command = "sop get"; + + protected int count; + protected Set set; + + public SetGet(int count, boolean delete) { + this.headerCount = 1; + this.count = count; + this.delete = delete; + } + + public SetGet(int count, boolean delete, boolean dropIfEmpty) { + this(count, delete); + this.dropIfEmpty = dropIfEmpty; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } + + public void setSet(Set set) { + this.set = set; + } + + public String stringify() { + if (str != null) return str; + + StringBuilder b = new StringBuilder(); + b.append(count); + if (delete && dropIfEmpty) b.append(" drop"); + if (delete && !dropIfEmpty) b.append(" delete"); + + str = b.toString(); + return str; + } + + public String getCommand() { + return command; + } + + public void decodeItemHeader(String itemHeader) { + this.dataLength = Integer.parseInt(itemHeader); + } +} diff --git a/src/main/java/net/spy/memcached/collection/SetPipedExist.java b/src/main/java/net/spy/memcached/collection/SetPipedExist.java new file mode 100644 index 000000000..0d1c4278b --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SetPipedExist.java @@ -0,0 +1,96 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import net.spy.memcached.CachedData; +import net.spy.memcached.KeyUtil; +import net.spy.memcached.transcoders.Transcoder; + +public class SetPipedExist extends CollectionObject { + + public static final int MAX_PIPED_ITEM_COUNT = 500; + + private static final String COMMAND = "sop exist"; + private static final String PIPE = "pipe"; + + private final String key; + private final List values; + private final Transcoder tc; + private int itemCount; + + public List getValues() { + return this.values; + } + + public int getItemCount() { + return this.itemCount; + } + + public SetPipedExist(String key, List values, Transcoder tc) { + this.key = key; + this.values = values; + this.tc = tc; + this.itemCount = values.size(); + } + + public ByteBuffer getAsciiCommand() { + int capacity = 0; + + // decode values + Collection encodedList = new ArrayList(values.size()); + CachedData cd = null; + for (T each : values) { + cd = tc.encode(each); + encodedList.add(cd.getData()); + } + + // estimate the buffer capacity + for (byte[] each : encodedList) { + capacity += KeyUtil.getKeyBytes(key).length; + capacity += each.length; + capacity += 64; + } + + // allocate the buffer + ByteBuffer bb = ByteBuffer.allocate(capacity); + + // create ascii operation string + Iterator iterator = encodedList.iterator(); + while (iterator.hasNext()) { + byte[] each = iterator.next(); + + setArguments(bb, COMMAND, key, each.length, + (iterator.hasNext()) ? PIPE : ""); + bb.put(each); + bb.put(CRLF); + } + // flip the buffer + bb.flip(); + + return bb; + } + + public ByteBuffer getBinaryCommand() { + throw new RuntimeException("not supported in binary protocol yet."); + } +} diff --git a/src/main/java/net/spy/memcached/collection/SetStore.java b/src/main/java/net/spy/memcached/collection/SetStore.java new file mode 100644 index 000000000..8d2c2a1f8 --- /dev/null +++ b/src/main/java/net/spy/memcached/collection/SetStore.java @@ -0,0 +1,33 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +public class SetStore extends CollectionStore { + + private static final String command = "sop insert"; + + public SetStore() { } + + public SetStore(T value, boolean createKeyIfNotExists, RequestMode requestMode, CollectionAttributes attr) { + super(value, null, createKeyIfNotExists, requestMode, attr); + } + + public String getCommand() { + return command; + } + +} diff --git a/src/main/java/net/spy/memcached/compat/CloseUtil.java b/src/main/java/net/spy/memcached/compat/CloseUtil.java new file mode 100644 index 000000000..a214d4e5e --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/CloseUtil.java @@ -0,0 +1,34 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.compat; + +import net.spy.memcached.compat.log.Logger; +import net.spy.memcached.compat.log.LoggerFactory; + + +/** + * Superclass for all Spy Objects. + */ +public class SpyObject extends Object { + + private transient Logger logger=null; + + /** + * Get an instance of SpyObject. + */ + public SpyObject() { + super(); + } + + /** + * Get a Logger instance for this class. + * + * @return an appropriate logger instance. + */ + protected Logger getLogger() { + if(logger==null) { + logger=LoggerFactory.getLogger(getClass()); + } + return(logger); + } + +} diff --git a/src/main/java/net/spy/memcached/compat/SpyThread.java b/src/main/java/net/spy/memcached/compat/SpyThread.java new file mode 100644 index 000000000..dfc17e2fa --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/SpyThread.java @@ -0,0 +1,45 @@ +// Copyright (c) 2002 Dustin Sallings + +package net.spy.memcached.compat; + +import net.spy.memcached.compat.log.Logger; +import net.spy.memcached.compat.log.LoggerFactory; + +/** + * Superclass for all Spy Threads. + */ +public class SpyThread extends Thread { + + private transient Logger logger=null; + + // Thread has *eight* constructors. Damnit. + + /** + * Get an instance of SpyThread. + */ + public SpyThread() { + super(); + } + + /** + * Get an instance of SpyThread with a name. + * + * @param name thread name + */ + public SpyThread(String name) { + super(name); + } + + /** + * Get a Logger instance for this class. + * + * @return an appropriate logger instance. + */ + protected Logger getLogger() { + if(logger==null) { + logger=LoggerFactory.getLogger(getClass()); + } + return(logger); + } + +} diff --git a/src/main/java/net/spy/memcached/compat/SyncThread.java b/src/main/java/net/spy/memcached/compat/SyncThread.java new file mode 100644 index 000000000..c7763553f --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/SyncThread.java @@ -0,0 +1,112 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.compat; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.IdentityHashMap; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; + +/** + * Thread that invokes a callable multiple times concurrently. + */ +public class SyncThread extends SpyThread { + + private final Callable callable; + private final CyclicBarrier barrier; + private final CountDownLatch latch; + private Throwable throwable=null; + private T rv=null; + + /** + * Get a SyncThread that will call the given callable when the given + * barrier allows it past. + * + * @param b the barrier + * @param c the callable + */ + public SyncThread(CyclicBarrier b, Callable c) { + super("SyncThread"); + setDaemon(true); + callable=c; + barrier=b; + latch=new CountDownLatch(1); + start(); + } + + /** + * Wait for the barrier, invoke the callable and capture the result or an + * exception. + */ + @Override + public void run() { + try { + barrier.await(); + rv=callable.call(); + } catch(Throwable t) { + throwable=t; + } + latch.countDown(); + } + + /** + * Get the result from the invocation. + * + * @return the result + * @throws Throwable if an error occurred when evaluating the callable + */ + public T getResult() throws Throwable { + latch.await(); + if(throwable != null) { + throw throwable; + } + return rv; + } + + /** + * Get a collection of SyncThreads that all began as close to the + * same time as possible and have all completed. + * @param the result type of the SyncThread + * @param num the number of concurrent threads to execute + * @param callable the thing to call + * @return the completed SyncThreads + * @throws InterruptedException if we're interrupted during join + */ + public static Collection> getCompletedThreads( + int num, Callable callable) throws InterruptedException { + Collection> rv=new ArrayList>(num); + + CyclicBarrier barrier=new CyclicBarrier(num); + for(int i=0; i(barrier, callable)); + } + + for(SyncThread t : rv) { + t.join(); + } + + return rv; + } + + /** + * Get the distinct result count for the given callable at the given + * concurrency. + * + * @param the type of the callable + * @param num the concurrency + * @param callable the callable to invoke + * @return the number of distinct (by identity) results found + * @throws Throwable if an exception occurred in one of the invocations + */ + public static int getDistinctResultCount(int num, Callable callable) + throws Throwable { + IdentityHashMap found=new IdentityHashMap(); + Collection> threads=getCompletedThreads(num, callable); + for(SyncThread s : threads) { + found.put(s.getResult(), new Object()); + } + return found.size(); + } +} diff --git a/src/main/java/net/spy/memcached/compat/log/AbstractLogger.java b/src/main/java/net/spy/memcached/compat/log/AbstractLogger.java new file mode 100644 index 000000000..f33e60be6 --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/AbstractLogger.java @@ -0,0 +1,225 @@ +// Copyright (c) 2002 SPY internetworking + +package net.spy.memcached.compat.log; + +/** + * Abstract implementation of Logger providing most of the common + * framework. + */ +public abstract class AbstractLogger implements Logger { + + private final String name; + + /** + * Instantiate the abstract logger. + */ + protected AbstractLogger(String nm) { + super(); + if(nm == null) { + throw new NullPointerException("Logger name may not be null."); + } + name=nm; + } + + /** + * Get the name of this logger. + */ + public String getName() { + return(name); + } + + /** + * Get the throwable from the last element of this array if it is + * Throwable, else null. + */ + public Throwable getThrowable(Object args[]) { + Throwable rv=null; + if(args.length > 0) { + if(args[args.length-1] instanceof Throwable) { + rv=(Throwable)args[args.length-1]; + } + } + return rv; + } + + /** + * True if debug is enabled for this logger. + * Default implementation always returns false + * + * @return true if debug messages would be displayed + */ + public abstract boolean isDebugEnabled(); + + /** + * True if debug is enabled for this logger. + * Default implementation always returns false + * + * @return true if info messages would be displayed + */ + public abstract boolean isInfoEnabled(); + + /** + * Log a message at debug level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + public void debug(Object message, Throwable exception) { + log(Level.DEBUG, message, exception); + } + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + public void debug(String message, Object... args) { + if(isDebugEnabled()) { + debug(String.format(message, args), getThrowable(args)); + } + } + + /** + * Log a message at debug level. + * + * @param message the message to log + */ + public void debug(Object message) { + debug(message, null); + } + + /** + * Log a message at info level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + public void info(Object message, Throwable exception) { + log(Level.INFO, message, exception); + } + /** + * Log a formatted message at info level. + * + * @param message the message to log + * @param args the arguments for that message + */ + public void info(String message, Object... args) { + if(isInfoEnabled()) { + info(String.format(message, args), getThrowable(args)); + } + } + + /** + * Log a message at info level. + * + * @param message the message to log + */ + public void info(Object message) { + info(message, null); + } + + /** + * Log a message at warning level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + public void warn(Object message, Throwable exception) { + log(Level.WARN, message, exception); + } + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + public void warn(String message, Object... args) { + warn(String.format(message, args), getThrowable(args)); + } + + /** + * Log a message at warning level. + * + * @param message the message to log + */ + public void warn(Object message) { + warn(message, null); + } + + /** + * Log a message at error level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + public void error(Object message, Throwable exception) { + log(Level.ERROR, message, exception); + } + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + public void error(String message, Object... args) { + error(String.format(message, args), getThrowable(args)); + } + + /** + * Log a message at error level. + * + * @param message the message to log + */ + public void error(Object message) { + error(message, null); + } + + /** + * Log a message at fatal level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + public void fatal(Object message, Throwable exception) { + log(Level.FATAL, message, exception); + } + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + public void fatal(String message, Object... args) { + fatal(String.format(message, args), getThrowable(args)); + } + + /** + * Log a message at fatal level. + * + * @param message the message to log + */ + public void fatal(Object message) { + fatal(message, null); + } + + /** + * Log a message at the given level. + * + * @param level the level + * @param message the message + */ + public void log(Level level, Object message) { + log(level, message, null); + } + + /** + * Subclasses should implement this method to determine what to do when + * a client wants to log at a particular level. + * + * @param level the level to log at (see the fields of this class) + * @param message the message to log + * @param e the exception that caused the message (or null) + */ + public abstract void log(Level level, Object message, Throwable e); + +} diff --git a/src/main/java/net/spy/memcached/compat/log/DefaultLogger.java b/src/main/java/net/spy/memcached/compat/log/DefaultLogger.java new file mode 100644 index 000000000..0a5f56851 --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/DefaultLogger.java @@ -0,0 +1,59 @@ +// Copyright (c) 2002 SPY internetworking + +package net.spy.memcached.compat.log; + +import java.text.SimpleDateFormat; +import java.util.Date; + +/** + * Default logger implementation. + * + * This logger is really primitive. It just logs everything to stderr if + * it's higher than INFO. + */ +public class DefaultLogger extends AbstractLogger { + + private final SimpleDateFormat df; + + /** + * Get an instance of DefaultLogger. + */ + public DefaultLogger(String name) { + super(name); + df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS"); + } + + /** + * False. + */ + @Override + public boolean isDebugEnabled() { + return(false); + } + + /** + * True. + */ + @Override + public boolean isInfoEnabled() { + return(true); + } + + /** + * @see AbstractLogger + */ + @Override + public synchronized void log(Level level, Object message, Throwable e) { + if(level == Level.INFO + || level == Level.WARN + || level == Level.ERROR + || level == Level.FATAL) { + System.err.printf("%s %s %s: %s\n", + df.format(new Date()), level.name(), getName(), message); + if(e != null) { + e.printStackTrace(); + } + } + } + +} diff --git a/src/main/java/net/spy/memcached/compat/log/Level.java b/src/main/java/net/spy/memcached/compat/log/Level.java new file mode 100644 index 000000000..7c09fb50f --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/Level.java @@ -0,0 +1,39 @@ +// Copyright (c) 2002 Dustin Sallings + +package net.spy.memcached.compat.log; + +/** + * Levels for logging. + */ +public enum Level { + + /** + * Debug level. + */ + DEBUG, + /** + * Info level. + */ + INFO, + /** + * Warning level. + */ + WARN, + /** + * Error level. + */ + ERROR, + /** + * Fatal level. + */ + FATAL; + + /** + * Get a string representation of this level. + */ + @Override + public String toString() { + return("{LogLevel: " + name() + "}"); + } + +} diff --git a/src/main/java/net/spy/memcached/compat/log/Log4JLogger.java b/src/main/java/net/spy/memcached/compat/log/Log4JLogger.java new file mode 100644 index 000000000..ddaf082db --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/Log4JLogger.java @@ -0,0 +1,79 @@ +// Copyright (c) 2002 Dustin Sallings + +package net.spy.memcached.compat.log; + +/** + * Logging implementation using + * log4j. + */ +public class Log4JLogger extends AbstractLogger { + + // Can't really import this without confusion as there's another thing + // by this name in here. + private final org.apache.log4j.Logger l4jLogger; + + /** + * Get an instance of Log4JLogger. + */ + public Log4JLogger(String name) { + super(name); + + // Get the log4j logger instance. + l4jLogger=org.apache.log4j.Logger.getLogger(name); + } + + /** + * True if the underlying logger would allow debug messages through. + */ + @Override + public boolean isDebugEnabled() { + return(l4jLogger.isDebugEnabled()); + } + + /** + * True if the underlying logger would allow info messages through. + */ + @Override + public boolean isInfoEnabled() { + return(l4jLogger.isInfoEnabled()); + } + + /** + * Wrapper around log4j. + * + * @param level net.spy.compat.log.AbstractLogger level. + * @param message object message + * @param e optional throwable + */ + @Override + public void log(Level level, Object message, Throwable e) { + org.apache.log4j.Level pLevel=org.apache.log4j.Level.DEBUG; + + switch(level == null ? Level.FATAL : level) { + case DEBUG: + pLevel=org.apache.log4j.Level.DEBUG; + break; + case INFO: + pLevel=org.apache.log4j.Level.INFO; + break; + case WARN: + pLevel=org.apache.log4j.Level.WARN; + break; + case ERROR: + pLevel=org.apache.log4j.Level.ERROR; + break; + case FATAL: + pLevel=org.apache.log4j.Level.FATAL; + break; + default: + // I don't know what this is, so consider it fatal + pLevel=org.apache.log4j.Level.FATAL; + l4jLogger.log("net.spy.compat.log.AbstractLogger", pLevel, + "Unhandled log level: " + level + + " for the following message", null); + } + + l4jLogger.log("net.spy.compat.log.AbstractLogger", pLevel, message, e); + } + +} diff --git a/src/main/java/net/spy/memcached/compat/log/Logger.java b/src/main/java/net/spy/memcached/compat/log/Logger.java new file mode 100644 index 000000000..4549a52a0 --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/Logger.java @@ -0,0 +1,155 @@ +// Copyright (c) 2002 SPY internetworking + +package net.spy.memcached.compat.log; + +/** + * Abstract mechanism for dealing with logs from various objects. + * + * Implementations are expected to have a constructor that takes a single + * String representing the name of the logging item, or an empty constructor. + * + * @see LoggerFactory + */ +public interface Logger { + + /** + * Get the name of this logger. + */ + String getName(); + + /** + * True if debug is enabled for this logger. + * + * @return true if debug messages would be displayed + */ + boolean isDebugEnabled(); + + /** + * True if info is enabled for this logger. + * + * @return true if info messages would be displayed + */ + boolean isInfoEnabled(); + + /** + * Log a message at the specified level. + * + * @param level the level at which to log + * @param message the message to log + * @param exception an exception that caused the message + */ + void log(Level level, Object message, Throwable exception); + + /** + * Log a message at the specified level. + * + * @param level the level at which to log + * @param message the message to log + */ + void log(Level level, Object message); + + /** + * Log a message at debug level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + void debug(Object message, Throwable exception); + /** + * Log a message at debug level. + * + * @param message the message to log + */ + void debug(Object message); + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + void debug(String message, Object... args); + + /** + * Log a message at info level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + void info(Object message, Throwable exception); + /** + * Log a message at info level. + * + * @param message the message to log + */ + void info(Object message); + /** + * Log a formatted message at info level. + * + * @param message the message to log + * @param args the arguments for that message + */ + void info(String message, Object... args); + + /** + * Log a message at warning level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + void warn(Object message, Throwable exception); + /** + * Log a message at warning level. + * + * @param message the message to log + */ + void warn(Object message); + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + void warn(String message, Object... args); + + /** + * Log a message at error level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + void error(Object message, Throwable exception); + /** + * Log a message at error level. + * + * @param message the message to log + */ + void error(Object message); + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + void error(String message, Object... args); + + /** + * Log a message at fatal level. + * + * @param message the message to log + * @param exception the exception that caused the message to be generated + */ + void fatal(Object message, Throwable exception); + /** + * Log a message at fatal level. + * + * @param message the message to log + */ + void fatal(Object message); + /** + * Log a formatted message at debug level. + * + * @param message the message to log + * @param args the arguments for that message + */ + void fatal(String message, Object... args); +} diff --git a/src/main/java/net/spy/memcached/compat/log/LoggerFactory.java b/src/main/java/net/spy/memcached/compat/log/LoggerFactory.java new file mode 100644 index 000000000..ca5a8d353 --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/LoggerFactory.java @@ -0,0 +1,162 @@ +// Copyright (c) 2002 SPY internetworking + +package net.spy.memcached.compat.log; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +/** + * Factory to get logger instances. + * + * The system property net.spy.compat.log.LoggerImpl + * should point to an implementation of net.spy.compat.log.Logger to + * use. + * + *

+ * Depending on how and where this was compiled, a sun logger (jdk 1.4) + * and/or log4j logger + * implementation may be included. Both are included with the official + * distribution. + *

+ * + * @see AbstractLogger + */ +public final class LoggerFactory extends Object { + + private static LoggerFactory instance=null; + + private final ConcurrentMap instances; + private Constructor instanceConstructor; + + /** + * Get an instance of LoggerFactory. + */ + private LoggerFactory() { + super(); + + instances=new ConcurrentHashMap(); + } + + private static void init() { + if(instance == null) { + instance=new LoggerFactory(); + } + } + + /** + * Get a logger by class. + * + * @param clazz the class for which we want the logger. + * @return a Logger instance + */ + public static Logger getLogger(Class clazz) { + return(getLogger(clazz.getName())); + } + + /** + * Get a logger by name. + * + * @param name the name for which we want the logger + * @return a Logger instance + */ + public static Logger getLogger(String name) { + if(name == null) { + throw new NullPointerException("Logger name may not be null."); + } + init(); + return(instance.internalGetLogger(name)); + } + + // Get an instance of Logger from internal mechanisms. + private Logger internalGetLogger(String name) { + assert name != null : "Name was null"; + Logger rv=instances.get(name); + + if (rv==null) { + Logger newLogger=null; + try { + newLogger=getNewInstance(name); + } catch(Exception e) { + throw new RuntimeException("Problem getting logger", e); + } + Logger tmp=instances.putIfAbsent(name, newLogger); + // Return either the new logger we've just made, or one that was + // created while we were waiting + rv = tmp == null ? newLogger : tmp; + } + + return(rv); + + } + + private Logger getNewInstance(String name) + throws InstantiationException, IllegalAccessException, + IllegalArgumentException, InvocationTargetException { + + if(instanceConstructor==null) { + getConstructor(); + } + Object[] args={name}; + Logger rv=instanceConstructor.newInstance(args); + + return (rv); + } + + // Find the appropriate constructor + @SuppressWarnings("unchecked") + private void getConstructor() { + Class c=DefaultLogger.class; + String className=System.getProperty("net.spy.log.LoggerImpl"); + + if(className!=null) { + try { + c=(Class) Class.forName(className); + } catch(NoClassDefFoundError e) { + System.err.println("Warning: " + className + + " not found while initializing" + + " net.spy.compat.log.LoggerFactory"); + e.printStackTrace(); + c=DefaultLogger.class; + } catch(ClassNotFoundException e) { + System.err.println("Warning: " + className + + " not found while initializing" + + " net.spy.compat.log.LoggerFactory"); + e.printStackTrace(); + c=DefaultLogger.class; + } + } + + // Find the best constructor + try { + // Try to find a constructor that takes a single string + Class[] args={String.class}; + instanceConstructor=c.getConstructor(args); + } catch(NoSuchMethodException e) { + try { + // Try to find an empty constructor + Class[] args={}; + instanceConstructor=c.getConstructor(args); + } catch(NoSuchMethodException e2) { + System.err.println("Warning: " + className + + " has no appropriate constructor, using defaults."); + + // Try to find a constructor that takes a single string + try { + Class[] args={String.class}; + instanceConstructor= + DefaultLogger.class.getConstructor(args); + } catch(NoSuchMethodException e3) { + // This shouldn't happen. + throw new NoSuchMethodError( + "There used to be a constructor that takes a single " + + "String on " + + DefaultLogger.class + ", but I can't " + + "find one now."); + } // SOL + } // No empty constructor + } // No constructor that takes a string + } // getConstructor + +} diff --git a/src/main/java/net/spy/memcached/compat/log/SunLogger.java b/src/main/java/net/spy/memcached/compat/log/SunLogger.java new file mode 100644 index 000000000..73200ad4d --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/log/SunLogger.java @@ -0,0 +1,110 @@ +// Copyright (c) 2002 Dustin Sallings + +package net.spy.memcached.compat.log; + +/** + * Logging implementation using the sun logger. + */ +public class SunLogger extends AbstractLogger { + + // Can't really import this without confusion as there's another thing + // by this name in here. + private final java.util.logging.Logger sunLogger; + + /** + * Get an instance of SunLogger. + */ + public SunLogger(String name) { + super(name); + + // Get the sun logger instance. + sunLogger=java.util.logging.Logger.getLogger(name); + } + + /** + * True if the underlying logger would allow Level.FINE through. + */ + @Override + public boolean isDebugEnabled() { + return(sunLogger.isLoggable(java.util.logging.Level.FINE)); + } + + /** + * True if the underlying logger would allow Level.INFO through. + */ + @Override + public boolean isInfoEnabled() { + return(sunLogger.isLoggable(java.util.logging.Level.INFO)); + } + + /** + * Wrapper around sun logger. + * + * @param level net.spy.compat.log.AbstractLogger level. + * @param message object message + * @param e optional throwable + */ + @Override + public void log(Level level, Object message, Throwable e) { + java.util.logging.Level sLevel=java.util.logging.Level.SEVERE; + + switch(level == null ? Level.FATAL : level) { + case DEBUG: + sLevel=java.util.logging.Level.FINE; + break; + case INFO: + sLevel=java.util.logging.Level.INFO; + break; + case WARN: + sLevel=java.util.logging.Level.WARNING; + break; + case ERROR: + sLevel=java.util.logging.Level.SEVERE; + break; + case FATAL: + sLevel=java.util.logging.Level.SEVERE; + break; + default: + // I don't know what this is, so consider it fatal + sLevel=java.util.logging.Level.SEVERE; + sunLogger.log(sLevel, "Unhandled log level: " + level + + " for the following message"); + } + + // Figure out who was logging. + Throwable t=new Throwable(); + StackTraceElement[] ste=t.getStackTrace(); + StackTraceElement logRequestor=null; + String alclass=AbstractLogger.class.getName(); + for(int i=0; i + + + + + Logging Abstractions + + + +

Logging Abstractions

+ +

+ Logging is performed through these logging abstractions that + were pulled from another project and duplicated here to avoid a + dependency (hence compat). +

+ +

+ For more information on logging, see the + logging + page on the project wiki. +

+ + + diff --git a/src/main/java/net/spy/memcached/compat/package.html b/src/main/java/net/spy/memcached/compat/package.html new file mode 100644 index 000000000..c68a2af60 --- /dev/null +++ b/src/main/java/net/spy/memcached/compat/package.html @@ -0,0 +1,19 @@ + + + + + + spy.jar compatibility classes + + + +

spy.jar compatibility classes

+ +

+ This package exists to remove the spy.jar dependency by copying + some of the critical pieces into the client base itself. +

+ + + diff --git a/src/main/java/net/spy/memcached/internal/BTreeStoreAndGetFuture.java b/src/main/java/net/spy/memcached/internal/BTreeStoreAndGetFuture.java new file mode 100644 index 000000000..c0e650c2a --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/BTreeStoreAndGetFuture.java @@ -0,0 +1,55 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import net.spy.memcached.collection.Element; +import net.spy.memcached.ops.CollectionOperationStatus; + +/** + * Future object that contains an b+tree element object + * @param + */ +public class BTreeStoreAndGetFuture extends CollectionFuture { + + private Element element; + + public BTreeStoreAndGetFuture(CountDownLatch l, long opTimeout) { + this(l, new AtomicReference(null), opTimeout); + } + + public BTreeStoreAndGetFuture(CountDownLatch l, AtomicReference oref, + long opTimeout) { + super(l, oref, opTimeout); + } + + public void set(T o, CollectionOperationStatus status) { + objRef.set(o); + opStatus = status; + } + + public Element getElement() { + return element; + } + + public void setElement(Element element) { + this.element = element; + } + +} diff --git a/src/main/java/net/spy/memcached/internal/BasicThreadFactory.java b/src/main/java/net/spy/memcached/internal/BasicThreadFactory.java new file mode 100644 index 000000000..2e7a968d7 --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/BasicThreadFactory.java @@ -0,0 +1,27 @@ +package net.spy.memcached.internal; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Simple thread factory that can set daemon status on threads and give them names. + */ +public class BasicThreadFactory implements ThreadFactory { + + private static final AtomicInteger poolNumber = new AtomicInteger(1); + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + private final boolean daemon; + + public BasicThreadFactory(String name, boolean daemon) { + this.namePrefix = name + "-" + poolNumber.getAndIncrement() + "-"; + this.daemon = daemon; + } + + public Thread newThread(Runnable r) { + Thread t = new Thread(r, namePrefix + threadNumber.getAndIncrement()); + t.setDaemon(daemon); + return t; + } + +} diff --git a/src/main/java/net/spy/memcached/internal/BulkFuture.java b/src/main/java/net/spy/memcached/internal/BulkFuture.java new file mode 100644 index 000000000..b1c1e1232 --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/BulkFuture.java @@ -0,0 +1,47 @@ +package net.spy.memcached.internal; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +/** + * Additional flexibility for asyncGetBulk + * + *

+ * This interface is now returned from all asyncGetBulk + * methods. Unlike {@link #get(long, TimeUnit)}, + * {@link #getSome(long, TimeUnit)} does not throw + * CheckedOperationTimeoutException, thus allowing retrieval + * of partial results after timeout occurs. This behavior is + * especially useful in case of large multi gets. + *

+ * + * @author boris.partensky@gmail.com + * @param + * + */ +public interface BulkFuture extends Future { + + /** + * @return true if timeout was reached, false otherwise + */ + public boolean isTimeout(); + + /** + * Wait for the operation to complete and return results + * + * If operation could not complete within specified + * timeout, partial result is returned. Otherwise, the + * behavior is identical to {@link #get(long, TimeUnit)} + * + * + * @param timeout + * @param unit + * @return + * @throws InterruptedException + * @throws ExecutionException + */ + public V getSome(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException; + +} diff --git a/src/main/java/net/spy/memcached/internal/BulkGetFuture.java b/src/main/java/net/spy/memcached/internal/BulkGetFuture.java new file mode 100644 index 000000000..d6e5461bc --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/BulkGetFuture.java @@ -0,0 +1,197 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.MemcachedConnection; +import net.spy.memcached.compat.log.LoggerFactory; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.plugin.LocalCacheManager; + +/** + * Future for handling results from bulk gets. + * + * Not intended for general use. + * + * @param types of objects returned from the GET + */ +public class BulkGetFuture implements BulkFuture> { + private final Map> rvMap; + private final Collection ops; + private final CountDownLatch latch; + private boolean cancelled = false; + private boolean timeout = false; + + // FIXME right position? + private LocalCacheManager localCacheManager; + + public BulkGetFuture(Map> m, + Collection getOps, CountDownLatch l) { + super(); + rvMap = m; + ops = getOps; + latch = l; + } + + public BulkGetFuture(Map> m, + Collection getOps, CountDownLatch l, + LocalCacheManager lcm) { + super(); + rvMap = m; + ops = getOps; + latch = l; + localCacheManager = lcm; + } + + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + rv |= op.getState() == OperationState.WRITING; + op.cancel(); + } + for (Future v : rvMap.values()) { + v.cancel(ign); + } + cancelled = true; + return rv; + } + + public Map get() throws InterruptedException, ExecutionException { + try { + return get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new RuntimeException("Timed out waiting forever", e); + } + } + + /* + * (non-Javadoc) + * + * @see net.spy.memcached.internal.BulkFuture#getSome(long, + * java.util.concurrent.TimeUnit) + */ + public Map getSome(long to, TimeUnit unit) + throws InterruptedException, ExecutionException { + Collection timedoutOps = new HashSet(); + Map ret = internalGet(to, unit, timedoutOps); + if (timedoutOps.size() > 0) { + timeout = true; + LoggerFactory.getLogger(getClass()).warn( + new CheckedOperationTimeoutException( + "Operation timed out: ", timedoutOps).getMessage()); + } + return ret; + + } + + /* + * get all or nothing: timeout exception is thrown if all the data could not + * be retrieved + * + * @see java.util.concurrent.Future#get(long, java.util.concurrent.TimeUnit) + */ + public Map get(long to, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + Collection timedoutOps = new HashSet(); + Map ret = internalGet(to, unit, timedoutOps); + if (timedoutOps.size() > 0) { + this.timeout = true; + throw new CheckedOperationTimeoutException("Operation timed out.", + timedoutOps); + } + return ret; + } + + /** + * refactored code common to both get(long, TimeUnit) and getSome(long, + * TimeUnit) + * + * @param to + * @param unit + * @param timedoutOps + * @return + * @throws InterruptedException + * @throws ExecutionException + */ + private Map internalGet(long to, TimeUnit unit, + Collection timedoutOps) throws InterruptedException, + ExecutionException { + if (!latch.await(to, unit)) { + for (Operation op : ops) { + if (op.getState() != OperationState.COMPLETE) { + MemcachedConnection.opTimedOut(op); + timedoutOps.add(op); + } else { + MemcachedConnection.opSucceeded(op); + } + } + } + for (Operation op : ops) { + if (op.isCancelled()) { + throw new ExecutionException(new RuntimeException("Cancelled")); + } + if (op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + } + Map m = new HashMap(); + for (Map.Entry> me : rvMap.entrySet()) { + String key = me.getKey(); + Future future = me.getValue(); + T value = future.get(); + + // put the key into the result map. + m.put(key, value); + + // cache the key locally + if (localCacheManager != null) { + // iff it is from the remote cache. + if (!(future instanceof LocalCacheManager.Task)) { + localCacheManager.put(key, value); + } + } + } + return m; + } + + public boolean isCancelled() { + return cancelled; + } + + public boolean isDone() { + return latch.getCount() == 0; + } + + /* + * set to true if timeout was reached. + * + * @see net.spy.memcached.internal.BulkFuture#isTimeout() + */ + public boolean isTimeout() { + return timeout; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/internal/CheckedOperationTimeoutException.java b/src/main/java/net/spy/memcached/internal/CheckedOperationTimeoutException.java new file mode 100644 index 000000000..d8b9c9a76 --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/CheckedOperationTimeoutException.java @@ -0,0 +1,85 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.MemcachedNode; +import net.spy.memcached.ops.Operation; + +/** + * Timeout exception that tracks the original operation. + */ +public class CheckedOperationTimeoutException extends TimeoutException { + + private final Collection operations; + + /** + * Construct a CheckedOperationTimeoutException with the given message + * and operation. + * + * @param message the message + * @param op the operation that timed out + */ + public CheckedOperationTimeoutException(String message, Operation op) { + this(message, Collections.singleton(op)); + } + + public CheckedOperationTimeoutException(String message, + Collection ops) { + super(createMessage(message, ops)); + operations = ops; + } + + private static String createMessage(String message, + Collection ops) { + StringBuilder rv = new StringBuilder(message); + rv.append(" - failing node"); + rv.append(ops.size() == 1 ? ": " : "s: "); + boolean first = true; + for(Operation op : ops) { + if(first) { + first = false; + } else { + rv.append(", "); + } + MemcachedNode node = op == null ? null : op.getHandlingNode(); + rv.append(node == null ? "" : node.getSocketAddress()); + if (op != null) { + rv.append(" [").append(op.getState()).append("]"); + } + if (node != null) { + rv.append(" [").append(node.getStatus()).append("]"); + } +// if (op != null && op.getBuffer() != null) { +// rv.append(" [") +// .append(new String(op.getBuffer().array()).replace( +// "\r\n", "\\n")).append("]"); +// } + } + return rv.toString(); + } + + /** + * Get the operation that timed out. + */ + public Collection getOperations() { + return operations; + } +} diff --git a/src/main/java/net/spy/memcached/internal/CollectionFuture.java b/src/main/java/net/spy/memcached/internal/CollectionFuture.java new file mode 100644 index 000000000..e3a9b98ed --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/CollectionFuture.java @@ -0,0 +1,120 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import net.spy.memcached.MemcachedConnection; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; + +/** + * Managed future for collection operations. + * + * Not intended for general use. + * + * @param Type of object returned from this future. + */ +public class CollectionFuture implements Future { + + protected final CountDownLatch latch; + protected final AtomicReference objRef; + protected final long timeout; + protected Operation op; + protected CollectionOperationStatus opStatus; + + public CollectionFuture(CountDownLatch l, long opTimeout) { + this(l, new AtomicReference(null), opTimeout); + } + + public CollectionFuture(CountDownLatch l, AtomicReference oref, + long opTimeout) { + super(); + latch=l; + objRef=oref; + timeout = opTimeout; + } + + public boolean cancel(boolean ign) { + assert op != null : "No operation"; + op.cancel(); + // This isn't exactly correct, but it's close enough. If we're in + // a writing state, we *probably* haven't started. + return op.getState() == OperationState.WRITING; + } + + public T get() throws InterruptedException, ExecutionException { + try { + return get(timeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new RuntimeException( + "Timed out waiting for operation. >" + timeout, e); + } + } + + public T get(long duration, TimeUnit units) + throws InterruptedException, TimeoutException, ExecutionException { + if(!latch.await(duration, units)) { + // whenever timeout occurs, continuous timeout counter will increase by 1. + MemcachedConnection.opTimedOut(op); + throw new CheckedOperationTimeoutException( + "Timed out waiting for operation. >" + duration, op); + } else { + // continuous timeout counter will be reset + MemcachedConnection.opSucceeded(op); + } + if(op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + if(isCancelled()) { + throw new ExecutionException(new RuntimeException("Cancelled")); + } + + return objRef.get(); + } + + public void set(T o, CollectionOperationStatus status) { + objRef.set(o); + opStatus = status; + } + + public void setOperation(Operation to) { + op=to; + } + + public boolean isCancelled() { + assert op != null : "No operation"; + return op.isCancelled(); + } + + public boolean isDone() { + assert op != null : "No operation"; + return latch.getCount() == 0 || + op.isCancelled() || op.getState() == OperationState.COMPLETE; + } + + public CollectionOperationStatus getOperationStatus() { + return (op.getState() == OperationState.COMPLETE)? opStatus : null; + } + +} diff --git a/src/main/java/net/spy/memcached/internal/CollectionGetBulkFuture.java b/src/main/java/net/spy/memcached/internal/CollectionGetBulkFuture.java new file mode 100644 index 000000000..45df979df --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/CollectionGetBulkFuture.java @@ -0,0 +1,115 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.Collection; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.MemcachedConnection; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +public class CollectionGetBulkFuture implements Future { + + private final Collection ops; + private final long timeout; + private final CountDownLatch latch; + private final T result; + + public CollectionGetBulkFuture(CountDownLatch latch, Collection ops, T result, long timeout) { + this.latch = latch; + this.ops = ops; + this.result = result; + this.timeout = timeout; + } + + @Override + public T get() throws InterruptedException, ExecutionException { + try { + return get(timeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new RuntimeException("Timed out waiting for smget operation", e); + } + } + + @Override + public T get(long duration, TimeUnit units) throws InterruptedException, TimeoutException, ExecutionException { + if (!latch.await(duration, units)) { + for (Operation op : ops) { + MemcachedConnection.opTimedOut(op); + } + throw new CheckedOperationTimeoutException("Timed out waiting for b+tree get bulk operation", ops); + } else { + for (Operation op : ops) { + MemcachedConnection.opSucceeded(op); + } + } + + for (Operation op : ops) { + if (op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + } + + if (isCancelled()) { + throw new ExecutionException(new RuntimeException("Cancelled")); + } + + return result; + } + + @Override + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + + @Override + public boolean isCancelled() { + boolean rv = false; + for (Operation op : ops) { + rv |= op.isCancelled(); + } + return rv; + } + + @Override + public boolean isDone() { + boolean rv = true; + for (Operation op : ops) { + rv &= op.getState() == OperationState.COMPLETE; + } + return rv || isCancelled(); + } + + public CollectionOperationStatus getOperationStatus() { + if (isCancelled()) + return new CollectionOperationStatus(new OperationStatus(false, "CANCELED")); + + return new CollectionOperationStatus(new OperationStatus(true, "END")); + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/internal/GetFuture.java b/src/main/java/net/spy/memcached/internal/GetFuture.java new file mode 100644 index 000000000..de750544a --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/GetFuture.java @@ -0,0 +1,56 @@ +package net.spy.memcached.internal; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.ops.Operation; + +/** + * Future returned for GET operations. + * + * Not intended for general use. + * + * @param Type of object returned from the get + */ +public class GetFuture implements Future { + + private final OperationFuture> rv; + + public GetFuture(CountDownLatch l, long opTimeout) { + this.rv = new OperationFuture>(l, opTimeout); + } + + public boolean cancel(boolean ign) { + return rv.cancel(ign); + } + + public T get() throws InterruptedException, ExecutionException { + Future v = rv.get(); + return v == null ? null : v.get(); + } + + public T get(long duration, TimeUnit units) + throws InterruptedException, TimeoutException, ExecutionException { + Future v = rv.get(duration, units); + return v == null ? null : v.get(); + } + + public void set(Future d) { + rv.set(d); + } + + public void setOperation(Operation to) { + rv.setOperation(to); + } + + public boolean isCancelled() { + return rv.isCancelled(); + } + + public boolean isDone() { + return rv.isDone(); + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/internal/ImmediateFuture.java b/src/main/java/net/spy/memcached/internal/ImmediateFuture.java new file mode 100644 index 000000000..399dca98e --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/ImmediateFuture.java @@ -0,0 +1,56 @@ +/** + * + */ +package net.spy.memcached.internal; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * A future that fires immediately. + */ +public class ImmediateFuture implements Future { + private final Boolean value; + private final ExecutionException exception; + + public ImmediateFuture(Boolean returnValue) { + value = returnValue; + exception = null; + } + + public ImmediateFuture(Exception e) { + value = null; + exception = new ExecutionException(e); + } + + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + public Boolean get() throws InterruptedException, ExecutionException { + if(exception != null) { + throw exception; + } + return value; + } + + public Boolean get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, + TimeoutException { + if(exception != null) { + throw exception; + } + return value; + } + + public boolean isCancelled() { + return false; + } + + public boolean isDone() { + return true; + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/internal/OperationFuture.java b/src/main/java/net/spy/memcached/internal/OperationFuture.java new file mode 100644 index 000000000..6541f7d82 --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/OperationFuture.java @@ -0,0 +1,112 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import net.spy.memcached.MemcachedConnection; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; + +/** + * Managed future for operations. + * + * Not intended for general use. + * + * @param Type of object returned from this future. + */ +public class OperationFuture implements Future { + + private final CountDownLatch latch; + private final AtomicReference objRef; + private final long timeout; + private Operation op; + + public OperationFuture(CountDownLatch l, long opTimeout) { + this(l, new AtomicReference(null), opTimeout); + } + + public OperationFuture(CountDownLatch l, AtomicReference oref, + long opTimeout) { + super(); + latch=l; + objRef=oref; + timeout = opTimeout; + } + + public boolean cancel(boolean ign) { + assert op != null : "No operation"; + op.cancel(); + // This isn't exactly correct, but it's close enough. If we're in + // a writing state, we *probably* haven't started. + return op.getState() == OperationState.WRITING; + } + + public T get() throws InterruptedException, ExecutionException { + try { + return get(timeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new RuntimeException( + "Timed out waiting for operation. >" + timeout, e); + } + } + + public T get(long duration, TimeUnit units) + throws InterruptedException, TimeoutException, ExecutionException { + if(!latch.await(duration, units)) { + // whenever timeout occurs, continuous timeout counter will increase by 1. + MemcachedConnection.opTimedOut(op); + throw new CheckedOperationTimeoutException( + "Timed out waiting for operation. >" + duration, op); + } else { + // continuous timeout counter will be reset + MemcachedConnection.opSucceeded(op); + } + if(op != null && op.hasErrored()) { + throw new ExecutionException(op.getException()); + } + if(isCancelled()) { + throw new ExecutionException(new RuntimeException("Cancelled")); + } + + return objRef.get(); + } + + public void set(T o) { + objRef.set(o); + } + + public void setOperation(Operation to) { + op=to; + } + + public boolean isCancelled() { + assert op != null : "No operation"; + return op.isCancelled(); + } + + public boolean isDone() { + assert op != null : "No operation"; + return latch.getCount() == 0 || + op.isCancelled() || op.getState() == OperationState.COMPLETE; + } +} diff --git a/src/main/java/net/spy/memcached/internal/ReconnectJob.java b/src/main/java/net/spy/memcached/internal/ReconnectJob.java new file mode 100644 index 000000000..cf30a42ff --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/ReconnectJob.java @@ -0,0 +1,51 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import net.spy.memcached.ArcusClient; +import net.spy.memcached.ConnectionFactoryBuilder; + +public class ReconnectJob { + + String hostPort; + String serviceCode; + ConnectionFactoryBuilder cfb; + ArcusClient[] client; + + public ReconnectJob(String hostPort, String serviceCode, + ConnectionFactoryBuilder cfb, ArcusClient[] client) { + super(); + this.hostPort = hostPort; + this.serviceCode = serviceCode; + this.cfb = cfb; + this.client = client; + } + + public String getHostPort() { + return hostPort; + } + public String getServiceCode() { + return serviceCode; + } + public ConnectionFactoryBuilder getCfb() { + return cfb; + } + public ArcusClient[] getClient() { + return client; + } + +} diff --git a/src/main/java/net/spy/memcached/internal/SMGetFuture.java b/src/main/java/net/spy/memcached/internal/SMGetFuture.java new file mode 100644 index 000000000..cbf0453cd --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/SMGetFuture.java @@ -0,0 +1,80 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; + +public abstract class SMGetFuture implements Future { + + private final Collection ops; + private final long timeout; + + public SMGetFuture(Collection ops, long timeout) { + this.ops = ops; + this.timeout = timeout; + } + + @Override + public T get() throws InterruptedException, ExecutionException { + try { + return get(timeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + throw new RuntimeException("Timed out waiting for smget operation", e); + } + } + + @Override + public boolean cancel(boolean ign) { + boolean rv = false; + for (Operation op : ops) { + op.cancel(); + rv |= op.getState() == OperationState.WRITING; + } + return rv; + } + + @Override + public boolean isCancelled() { + boolean rv = false; + for (Operation op : ops) { + rv |= op.isCancelled(); + } + return rv; + } + + @Override + public boolean isDone() { + boolean rv = true; + for (Operation op : ops) { + rv &= op.getState() == OperationState.COMPLETE; + } + return rv || isCancelled(); + } + + public abstract List getMissedKeyList(); + + public abstract CollectionOperationStatus getOperationStatus(); +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/internal/SingleElementInfiniteIterator.java b/src/main/java/net/spy/memcached/internal/SingleElementInfiniteIterator.java new file mode 100644 index 000000000..12fa938cf --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/SingleElementInfiniteIterator.java @@ -0,0 +1,36 @@ +package net.spy.memcached.internal; + +import java.lang.UnsupportedOperationException; +import java.util.Iterator; + +/** + * An iterator that returns a single element for as many elements as + * are needed from the iterator; in other words, #hasNext() never + * returns false. + */ +public class SingleElementInfiniteIterator + implements Iterator { + private final T element; + + /** + * Construct a iterator tat returns the input element an + * infinite number of times. + * + * @param element the element that #next() should return + */ + public SingleElementInfiniteIterator(T element) { + this.element = element; + } + + public boolean hasNext() { + return true; + } + + public T next() { + return element; + } + + public void remove() { + throw new UnsupportedOperationException("Cannot remove from this iterator."); + } +} diff --git a/src/main/java/net/spy/memcached/internal/package.html b/src/main/java/net/spy/memcached/internal/package.html new file mode 100644 index 000000000..9b289bcd6 --- /dev/null +++ b/src/main/java/net/spy/memcached/internal/package.html @@ -0,0 +1,14 @@ + + + + + + Internal utilities. + + + +

Internal utilities.

+ + + diff --git a/src/main/java/net/spy/memcached/ops/ArrayOperationQueueFactory.java b/src/main/java/net/spy/memcached/ops/ArrayOperationQueueFactory.java new file mode 100644 index 000000000..7944f8fc8 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/ArrayOperationQueueFactory.java @@ -0,0 +1,31 @@ +package net.spy.memcached.ops; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; + +/** + * OperationQueueFactory that uses an ArrayBlockingQueue. + */ +public class ArrayOperationQueueFactory implements OperationQueueFactory { + + private final int capacity; + + /** + * Create an ArrayOperationQueueFactory that creates blocking queues with + * the given capacity. + * + * @param cap maximum size of a queue produced by this factory + */ + public ArrayOperationQueueFactory(int cap) { + super(); + capacity = cap; + } + + /* (non-Javadoc) + * @see net.spy.memcached.ops.OperationQueueFactory#create() + */ + public BlockingQueue create() { + return new ArrayBlockingQueue(capacity); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/BTreeFindPositionOperation.java b/src/main/java/net/spy/memcached/ops/BTreeFindPositionOperation.java new file mode 100644 index 000000000..2545b0511 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/BTreeFindPositionOperation.java @@ -0,0 +1,29 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.BTreeFindPosition; + +public interface BTreeFindPositionOperation extends KeyedOperation { + + BTreeFindPosition getGet(); + + interface Callback extends OperationCallback { + void gotData(int position); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/BTreeGetBulkOperation.java b/src/main/java/net/spy/memcached/ops/BTreeGetBulkOperation.java new file mode 100644 index 000000000..6be39c324 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/BTreeGetBulkOperation.java @@ -0,0 +1,24 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +public interface BTreeGetBulkOperation extends KeyedOperation { + interface Callback extends OperationCallback { + void gotElement(String key, K subkey, int flags, byte[] eflag, byte[] data); + void gotKey(String key, int elementCount, OperationStatus status); + } +} diff --git a/src/main/java/net/spy/memcached/ops/BTreeGetByPositionOperation.java b/src/main/java/net/spy/memcached/ops/BTreeGetByPositionOperation.java new file mode 100644 index 000000000..af37188e2 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/BTreeGetByPositionOperation.java @@ -0,0 +1,30 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.BKeyObject; +import net.spy.memcached.collection.BTreeGetByPosition; + +public interface BTreeGetByPositionOperation extends KeyedOperation { + + BTreeGetByPosition getGet(); + + interface Callback extends OperationCallback { + void gotData(String key, int flags, int pos, BKeyObject bkey, byte[] eflag, byte[] data); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/BTreeSortMergeGetOperation.java b/src/main/java/net/spy/memcached/ops/BTreeSortMergeGetOperation.java new file mode 100644 index 000000000..e2db83b6b --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/BTreeSortMergeGetOperation.java @@ -0,0 +1,24 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +public interface BTreeSortMergeGetOperation extends KeyedOperation { + interface Callback extends OperationCallback { + void gotData(String key, Object subkey, int flags, byte[] data); + void gotMissedKey(byte[] data); + } +} diff --git a/src/main/java/net/spy/memcached/ops/BTreeStoreAndGetOperation.java b/src/main/java/net/spy/memcached/ops/BTreeStoreAndGetOperation.java new file mode 100644 index 000000000..80e26edc2 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/BTreeStoreAndGetOperation.java @@ -0,0 +1,30 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.BKeyObject; +import net.spy.memcached.collection.BTreeStoreAndGet; + +public interface BTreeStoreAndGetOperation extends KeyedOperation { + + BTreeStoreAndGet getGet(); + + interface Callback extends OperationCallback { + void gotData(String key, int flags, BKeyObject bkeyObject, byte[] elementFlag, byte[] data); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/BaseOperationFactory.java b/src/main/java/net/spy/memcached/ops/BaseOperationFactory.java new file mode 100644 index 000000000..6ad749124 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/BaseOperationFactory.java @@ -0,0 +1,108 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import java.util.ArrayList; +import java.util.Collection; + +import net.spy.memcached.OperationFactory; + +/** + * Base class for operation factories. + * + *

+ * There is little common code between OperationFactory implementations, but + * some exists, and is complicated and likely to cause problems. + *

+ */ +public abstract class BaseOperationFactory implements OperationFactory { + + private String first(Collection keys) { + return keys.iterator().next(); + } + + public Collection clone(KeyedOperation op) { + assert op.getState() == OperationState.WRITING + : "Who passed me an operation in the " + op.getState() + "state?"; + assert !op.isCancelled() : "Attempted to clone a canceled op"; + assert !op.hasErrored() : "Attempted to clone an errored op"; + + Collection rv = new ArrayList( + op.getKeys().size()); + if(op instanceof GetOperation) { + rv.addAll(cloneGet(op)); + } else if(op instanceof GetsOperation) { + GetsOperation.Callback callback = + (GetsOperation.Callback)op.getCallback(); + for(String k : op.getKeys()) { + rv.add(gets(k, callback)); + } + } else if(op instanceof CASOperation) { + CASOperation cop = (CASOperation)op; + rv.add(cas(cop.getStoreType(), first(op.getKeys()), + cop.getCasValue(), cop.getFlags(), cop.getExpiration(), + cop.getBytes(), cop.getCallback())); + } else if(op instanceof DeleteOperation) { + rv.add(delete(first(op.getKeys()), op.getCallback())); + } else if(op instanceof MutatorOperation) { + MutatorOperation mo = (MutatorOperation)op; + rv.add(mutate(mo.getType(), first(op.getKeys()), + mo.getBy(), mo.getDefault(), mo.getExpiration(), + op.getCallback())); + } else if(op instanceof StoreOperation) { + StoreOperation so = (StoreOperation)op; + rv.add(store(so.getStoreType(), first(op.getKeys()), so.getFlags(), + so.getExpiration(), so.getData(), op.getCallback())); + } else if(op instanceof ConcatenationOperation) { + ConcatenationOperation c = (ConcatenationOperation)op; + rv.add(cat(c.getStoreType(), c.getCasValue(), first(op.getKeys()), + c.getData(), c.getCallback())); + } else if(op instanceof SetAttrOperation) { + SetAttrOperation c = (SetAttrOperation)op; + rv.add(setAttr(first(c.getKeys()), c.getAttributes(), + c.getCallback())); + } else if(op instanceof GetAttrOperation) { + GetAttrOperation c = (GetAttrOperation)op; + rv.add(getAttr(first(c.getKeys()), + (GetAttrOperation.Callback) c.getCallback())); + } else if(op instanceof CollectionStoreOperation) { + CollectionStoreOperation c = (CollectionStoreOperation)op; + rv.add(collectionStore(first(c.getKeys()), c.getSubKey(), + c.getStore(), c.getData(), c.getCallback())); + } else if(op instanceof CollectionGetOperation) { + CollectionGetOperation c = (CollectionGetOperation)op; + rv.add(collectionGet(first(c.getKeys()), c.getGet(), + (CollectionGetOperation.Callback) c.getCallback())); + } else if(op instanceof CollectionDeleteOperation) { + CollectionDeleteOperation c = (CollectionDeleteOperation)op; + rv.add(collectionDelete(first(c.getKeys()), c.getDelete(), + c.getCallback())); + } else if(op instanceof CollectionExistOperation) { + CollectionExistOperation c = (CollectionExistOperation)op; + rv.add(collectionExist(first(c.getKeys()), c.getSubKey(), + c.getExist(), c.getCallback())); + } else { + assert false : "Unhandled operation type: " + op.getClass(); + } + + return rv; + } + + protected abstract Collection cloneGet( + KeyedOperation op); + +} diff --git a/src/main/java/net/spy/memcached/ops/CASOperation.java b/src/main/java/net/spy/memcached/ops/CASOperation.java new file mode 100644 index 000000000..0f0a3a43c --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CASOperation.java @@ -0,0 +1,38 @@ +package net.spy.memcached.ops; + +/** + * Operation that represents compare-and-swap. + */ +public interface CASOperation extends KeyedOperation { + + /** + * Get the type of storage used by this CASOperation. + */ + StoreType getStoreType(); + + /** + * Get the CAS value advised for this operation. + */ + long getCasValue(); + + /** + * Get the flags to be set for this operation. + */ + int getFlags(); + + /** + * Get the expiration to be set for this operation. + */ + int getExpiration(); + + /** + * Get the bytes to be set during this operation. + * + *

+ * Note, this returns an exact reference to the bytes and the data + * must not be modified. + *

+ */ + byte[] getBytes(); + +} diff --git a/src/main/java/net/spy/memcached/ops/CASOperationStatus.java b/src/main/java/net/spy/memcached/ops/CASOperationStatus.java new file mode 100644 index 000000000..b1b76a274 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CASOperationStatus.java @@ -0,0 +1,24 @@ +package net.spy.memcached.ops; + +import net.spy.memcached.CASResponse; + +/** + * OperationStatus subclass for indicating CAS status. + */ +public class CASOperationStatus extends OperationStatus { + + private final CASResponse casResponse; + + public CASOperationStatus(boolean success, String msg, CASResponse cres) { + super(success, msg); + casResponse=cres; + } + + /** + * Get the CAS response indicated here. + */ + public CASResponse getCASResponse() { + return casResponse; + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CancelledOperationStatus.java b/src/main/java/net/spy/memcached/ops/CancelledOperationStatus.java new file mode 100644 index 000000000..9b69c800e --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CancelledOperationStatus.java @@ -0,0 +1,12 @@ +package net.spy.memcached.ops; + +/** + * Operation status indicating an operation was cancelled. + */ +public class CancelledOperationStatus extends OperationStatus { + + public CancelledOperationStatus() { + super(false, "cancelled"); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionBulkStoreOperation.java b/src/main/java/net/spy/memcached/ops/CollectionBulkStoreOperation.java new file mode 100644 index 000000000..85b501d63 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionBulkStoreOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionBulkStore; + +/** + * Operation that represents collection object storage. + */ +public interface CollectionBulkStoreOperation extends KeyedOperation { + + CollectionBulkStore getStore(); + + interface Callback extends OperationCallback { + void gotStatus(Integer index, OperationStatus status); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionCountOperation.java b/src/main/java/net/spy/memcached/ops/CollectionCountOperation.java new file mode 100644 index 000000000..9dd87bfea --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionCountOperation.java @@ -0,0 +1,24 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +/** + * Operation that represents empty collection object create. + */ +public interface CollectionCountOperation extends KeyedOperation { + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionCreateOperation.java b/src/main/java/net/spy/memcached/ops/CollectionCreateOperation.java new file mode 100644 index 000000000..f696f9f17 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionCreateOperation.java @@ -0,0 +1,28 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionCreate; + +/** + * Operation that represents empty collection object create. + */ +public interface CollectionCreateOperation extends KeyedOperation { + + CollectionCreate getCreate(); + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionDeleteOperation.java b/src/main/java/net/spy/memcached/ops/CollectionDeleteOperation.java new file mode 100644 index 000000000..93f32b7da --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionDeleteOperation.java @@ -0,0 +1,28 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionDelete; + +/** + * Operation that represents collection object deletion. + */ +public interface CollectionDeleteOperation extends KeyedOperation { + + CollectionDelete getDelete(); + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionExistOperation.java b/src/main/java/net/spy/memcached/ops/CollectionExistOperation.java new file mode 100644 index 000000000..ec3d53f0e --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionExistOperation.java @@ -0,0 +1,30 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionExist; + +/** + * Operation that represents collection object membership check. + */ +public interface CollectionExistOperation extends KeyedOperation { + + String getSubKey(); + + CollectionExist getExist(); + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionGetOperation.java b/src/main/java/net/spy/memcached/ops/CollectionGetOperation.java new file mode 100644 index 000000000..be7fd57d1 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionGetOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionGet; + +/** + * Operation that represents collection object retreival. + */ +public interface CollectionGetOperation extends KeyedOperation { + + CollectionGet getGet(); + + interface Callback extends OperationCallback { + void gotData(String key, long subkey, int flags, byte[] data); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionMutateOperation.java b/src/main/java/net/spy/memcached/ops/CollectionMutateOperation.java new file mode 100644 index 000000000..c0d1dd0bd --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionMutateOperation.java @@ -0,0 +1,24 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +/** + * Operation that represents empty collection object create. + */ +public interface CollectionMutateOperation extends KeyedOperation { + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionOperationStatus.java b/src/main/java/net/spy/memcached/ops/CollectionOperationStatus.java new file mode 100644 index 000000000..c9e4fe438 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionOperationStatus.java @@ -0,0 +1,46 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionResponse; + +/** + * OperationStatus subclass for indicating collection status. + */ +public class CollectionOperationStatus extends OperationStatus { + + private final CollectionResponse collectionResponse; + + public CollectionOperationStatus(boolean success, String msg, + CollectionResponse res) { + super(success, msg); + this.collectionResponse = res; + } + + public CollectionOperationStatus(OperationStatus status) { + super(status.isSuccess(), status.getMessage()); + this.collectionResponse = CollectionResponse.resolve(status.getMessage()); + } + + /** + * Get the collection response indicated here. + */ + public CollectionResponse getResponse() { + return collectionResponse; + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionPipedExistOperation.java b/src/main/java/net/spy/memcached/ops/CollectionPipedExistOperation.java new file mode 100644 index 000000000..b0ef66db1 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionPipedExistOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.SetPipedExist; + +/** + * Operation that represents collection object storage. + */ +public interface CollectionPipedExistOperation extends KeyedOperation { + + SetPipedExist getExist(); + + interface Callback extends OperationCallback { + void gotStatus(Integer index, OperationStatus status); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionPipedStoreOperation.java b/src/main/java/net/spy/memcached/ops/CollectionPipedStoreOperation.java new file mode 100644 index 000000000..db22f6c54 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionPipedStoreOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionPipedStore; + +/** + * Operation that represents collection object storage. + */ +public interface CollectionPipedStoreOperation extends KeyedOperation { + + CollectionPipedStore getStore(); + + interface Callback extends OperationCallback { + void gotStatus(Integer index, OperationStatus status); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionPipedUpdateOperation.java b/src/main/java/net/spy/memcached/ops/CollectionPipedUpdateOperation.java new file mode 100644 index 000000000..78bc5b1d7 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionPipedUpdateOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionPipedUpdate; + +/** + * Operation that represents collection object storage. + */ +public interface CollectionPipedUpdateOperation extends KeyedOperation { + + CollectionPipedUpdate getUpdate(); + + interface Callback extends OperationCallback { + void gotStatus(Integer index, OperationStatus status); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionStoreOperation.java b/src/main/java/net/spy/memcached/ops/CollectionStoreOperation.java new file mode 100644 index 000000000..19063582a --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionStoreOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionStore; + +/** + * Operation that represents collection object storage. + */ +public interface CollectionStoreOperation extends KeyedOperation { + + String getSubKey(); + + CollectionStore getStore(); + + byte[] getData(); + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionUpdateOperation.java b/src/main/java/net/spy/memcached/ops/CollectionUpdateOperation.java new file mode 100644 index 000000000..b236e8e76 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionUpdateOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionUpdate; + +/** + * Operation that represents collection object update. + */ +public interface CollectionUpdateOperation extends KeyedOperation { + + String getSubKey(); + + CollectionUpdate getUpdate(); + + byte[] getData(); + +} diff --git a/src/main/java/net/spy/memcached/ops/CollectionUpsertOperation.java b/src/main/java/net/spy/memcached/ops/CollectionUpsertOperation.java new file mode 100644 index 000000000..f74ca4819 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/CollectionUpsertOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionStore; + +/** + * Operation that represents collection object storage. + */ +public interface CollectionUpsertOperation extends KeyedOperation { + + String getSubKey(); + + CollectionStore getStore(); + + byte[] getData(); + +} diff --git a/src/main/java/net/spy/memcached/ops/ConcatenationOperation.java b/src/main/java/net/spy/memcached/ops/ConcatenationOperation.java new file mode 100644 index 000000000..7e7f62825 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/ConcatenationOperation.java @@ -0,0 +1,28 @@ +package net.spy.memcached.ops; + +/** + * ConcatenationOperation is used to append or prepend data to an existing + * object in the cache. + */ +public interface ConcatenationOperation extends KeyedOperation { + + /** + * Get the concatenation type for this operation. + */ + ConcatenationType getStoreType(); + + /** + * Get the CAS value sent with this operation. + */ + long getCasValue(); + + /** + * Get the bytes to be set during this operation. + * + *

+ * Note, this returns an exact reference to the bytes and the data + * must not be modified. + *

+ */ + byte[] getData(); +} diff --git a/src/main/java/net/spy/memcached/ops/ConcatenationType.java b/src/main/java/net/spy/memcached/ops/ConcatenationType.java new file mode 100644 index 000000000..7bf8fc814 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/ConcatenationType.java @@ -0,0 +1,15 @@ +package net.spy.memcached.ops; + +/** + * Types of concatenation operations. + */ +public enum ConcatenationType { + /** + * Concatenate supplied data to the end of the existing data. + */ + append, + /** + * Concatenate existing data onto the end of the supplied data. + */ + prepend +} diff --git a/src/main/java/net/spy/memcached/ops/DeleteOperation.java b/src/main/java/net/spy/memcached/ops/DeleteOperation.java new file mode 100644 index 000000000..8ebd6e35d --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/DeleteOperation.java @@ -0,0 +1,9 @@ +package net.spy.memcached.ops; + + +/** + * Deletion operation. + */ +public interface DeleteOperation extends KeyedOperation { + // nothing in particular. +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/ExtendedBTreeGetOperation.java b/src/main/java/net/spy/memcached/ops/ExtendedBTreeGetOperation.java new file mode 100644 index 000000000..58d30431f --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/ExtendedBTreeGetOperation.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.CollectionGet; + +/** + * Operation that represents collection object retreival. + */ +public interface ExtendedBTreeGetOperation extends KeyedOperation { + + CollectionGet getGet(); + + interface Callback extends OperationCallback { + void gotData(String key, byte[] subkey, byte[] elementFlag, int flags, byte[] data); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/FlushOperation.java b/src/main/java/net/spy/memcached/ops/FlushOperation.java new file mode 100644 index 000000000..45238c359 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/FlushOperation.java @@ -0,0 +1,9 @@ +package net.spy.memcached.ops; + + +/** + * Flush operation marker. + */ +public interface FlushOperation extends Operation { + // nothing +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/GetAttrOperation.java b/src/main/java/net/spy/memcached/ops/GetAttrOperation.java new file mode 100644 index 000000000..7022e808d --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/GetAttrOperation.java @@ -0,0 +1,37 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +/** + * GetAttr operation. + */ +public interface GetAttrOperation extends KeyedOperation { + + /** + * Operation callback for the get request. + */ + interface Callback extends OperationCallback { + /** + * Callback for each result from a get. + * + * @param key the key that was retrieved + * @param attr an attribute(name=value) returned by the memcached server + */ + void gotAttribute(String key, String attr); + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/GetOperation.java b/src/main/java/net/spy/memcached/ops/GetOperation.java new file mode 100644 index 000000000..4f3178e51 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/GetOperation.java @@ -0,0 +1,23 @@ +package net.spy.memcached.ops; + + +/** + * Get operation. + */ +public interface GetOperation extends KeyedOperation { + + /** + * Operation callback for the get request. + */ + interface Callback extends OperationCallback { + /** + * Callback for each result from a get. + * + * @param key the key that was retrieved + * @param flags the flags for this value + * @param data the data stored under this key + */ + void gotData(String key, int flags, byte[] data); + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/GetsOperation.java b/src/main/java/net/spy/memcached/ops/GetsOperation.java new file mode 100644 index 000000000..d66d7561d --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/GetsOperation.java @@ -0,0 +1,24 @@ +package net.spy.memcached.ops; + + +/** + * Gets operation (get with CAS identifier support). + */ +public interface GetsOperation extends KeyedOperation { + + /** + * Operation callback for the Gets request. + */ + interface Callback extends OperationCallback { + /** + * Callback for each result from a gets. + * + * @param key the key that was retrieved + * @param flags the flags for this value + * @param cas the CAS value for this record + * @param data the data stored under this key + */ + void gotData(String key, int flags, long cas, byte[] data); + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/KeyedOperation.java b/src/main/java/net/spy/memcached/ops/KeyedOperation.java new file mode 100644 index 000000000..699fd4ffe --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/KeyedOperation.java @@ -0,0 +1,15 @@ +package net.spy.memcached.ops; + +import java.util.Collection; + +/** + * Operations that contain keys. + */ +public interface KeyedOperation extends Operation { + + /** + * Get the keys requested in this GetOperation. + */ + Collection getKeys(); + +} diff --git a/src/main/java/net/spy/memcached/ops/LinkedOperationQueueFactory.java b/src/main/java/net/spy/memcached/ops/LinkedOperationQueueFactory.java new file mode 100644 index 000000000..f394c9339 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/LinkedOperationQueueFactory.java @@ -0,0 +1,19 @@ +package net.spy.memcached.ops; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +/** + * OperationQueueFactory that creates LinkedBlockingQueue (unbounded) operation + * queues. + */ +public class LinkedOperationQueueFactory implements OperationQueueFactory { + + /* (non-Javadoc) + * @see net.spy.memcached.ops.OperationQueueFactory#create() + */ + public BlockingQueue create() { + return new LinkedBlockingQueue(); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/MultiGetOperationCallback.java b/src/main/java/net/spy/memcached/ops/MultiGetOperationCallback.java new file mode 100644 index 000000000..1b0f392f1 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/MultiGetOperationCallback.java @@ -0,0 +1,17 @@ +package net.spy.memcached.ops; + +/** + * MultiOperationCallback for get operations. + */ +public class MultiGetOperationCallback extends MultiOperationCallback + implements GetOperation.Callback { + + public MultiGetOperationCallback(OperationCallback original, int todo) { + super(original, todo); + } + + public void gotData(String key, int flags, byte[] data) { + ((GetOperation.Callback)originalCallback).gotData(key, flags, data); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/MultiGetsOperationCallback.java b/src/main/java/net/spy/memcached/ops/MultiGetsOperationCallback.java new file mode 100644 index 000000000..517dc2f79 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/MultiGetsOperationCallback.java @@ -0,0 +1,18 @@ +package net.spy.memcached.ops; + +/** + * MultiOperationCallback for get operations. + */ +public class MultiGetsOperationCallback extends MultiOperationCallback + implements GetsOperation.Callback { + + public MultiGetsOperationCallback(OperationCallback original, int todo) { + super(original, todo); + } + + public void gotData(String key, int flags, long cas, byte[] data) { + ((GetsOperation.Callback)originalCallback).gotData( + key, flags, cas, data); + } + +} diff --git a/src/main/java/net/spy/memcached/ops/MultiOperationCallback.java b/src/main/java/net/spy/memcached/ops/MultiOperationCallback.java new file mode 100644 index 000000000..2cc43b20d --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/MultiOperationCallback.java @@ -0,0 +1,41 @@ +package net.spy.memcached.ops; + +/** + * An operation callback that will capture receivedStatus and complete + * invocations and dispatch to a single callback. + * + *

+ * This is useful for the cases where a single request gets split into + * multiple requests and the callback needs to not know the difference. + *

+ */ +public abstract class MultiOperationCallback implements OperationCallback { + + private OperationStatus mostRecentStatus = null; + private int remaining=0; + protected final OperationCallback originalCallback; + + /** + * Get a MultiOperationCallback over the given callback for the specified + * number of replicates. + * + * @param original the original callback + * @param todo how many complete() calls we expect before dispatching. + */ + public MultiOperationCallback(OperationCallback original, int todo) { + originalCallback = original; + remaining = todo; + } + + public void complete() { + if(--remaining == 0) { + originalCallback.receivedStatus(mostRecentStatus); + originalCallback.complete(); + } + } + + public void receivedStatus(OperationStatus status) { + mostRecentStatus = status; + } + +} diff --git a/src/main/java/net/spy/memcached/ops/Mutator.java b/src/main/java/net/spy/memcached/ops/Mutator.java new file mode 100644 index 000000000..d61b745a7 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/Mutator.java @@ -0,0 +1,15 @@ +package net.spy.memcached.ops; + +/** + * Type of mutation to perform. + */ +public enum Mutator { + /** + * Increment a value on the memcached server. + */ + incr, + /** + * Decrement a value on the memcached server. + */ + decr +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/MutatorOperation.java b/src/main/java/net/spy/memcached/ops/MutatorOperation.java new file mode 100644 index 000000000..b4217e64c --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/MutatorOperation.java @@ -0,0 +1,27 @@ +package net.spy.memcached.ops; + +/** + * incr and decr operations. + */ +public interface MutatorOperation extends KeyedOperation { + + /** + * Get the mutator type used for this operation. + */ + Mutator getType(); + + /** + * Get the amount we're mutating by. + */ + int getBy(); + + /** + * Get the default value (for when there's no value to mutate). + */ + long getDefault(); + + /** + * Get the expiration to set in case of a new entry. + */ + int getExpiration(); +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/NoopOperation.java b/src/main/java/net/spy/memcached/ops/NoopOperation.java new file mode 100644 index 000000000..6ccb274f7 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/NoopOperation.java @@ -0,0 +1,8 @@ +package net.spy.memcached.ops; + +/** + * The NOOP Operation. + */ +public interface NoopOperation extends Operation { + // Nothing +} diff --git a/src/main/java/net/spy/memcached/ops/Operation.java b/src/main/java/net/spy/memcached/ops/Operation.java new file mode 100644 index 000000000..65f547bb6 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/Operation.java @@ -0,0 +1,83 @@ +package net.spy.memcached.ops; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import net.spy.memcached.MemcachedNode; + + +/** + * Base interface for all operations. + */ +public interface Operation { + + /** + * Has this operation been cancelled? + */ + boolean isCancelled(); + + /** + * True if an error occurred while processing this operation. + */ + boolean hasErrored(); + + /** + * Get the exception that occurred (or null if no exception occurred). + */ + OperationException getException(); + + /** + * Get the callback for this get operation. + */ + OperationCallback getCallback(); + + /** + * Cancel this operation. + */ + void cancel(); + + /** + * Get the current state of this operation. + */ + OperationState getState(); + + /** + * Get the write buffer for this operation. + */ + ByteBuffer getBuffer(); + + /** + * Invoked after having written all of the bytes from the supplied output + * buffer. + */ + void writeComplete(); + + /** + * Initialize this operation. This is used to prepare output byte buffers + * and stuff. + */ + void initialize(); + + /** + * Read data from the given byte buffer and dispatch to the appropriate + * read mechanism. + */ + void readFromBuffer(ByteBuffer data) throws IOException; + + /** + * Handle a raw data read. + */ + void handleRead(ByteBuffer data); + + /** + * Get the node that should've been handling this operation. + */ + MemcachedNode getHandlingNode(); + + /** + * Set a reference to the node that will be/is handling this operation. + * + * @param to a memcached node + */ + void setHandlingNode(MemcachedNode to); +} diff --git a/src/main/java/net/spy/memcached/ops/OperationCallback.java b/src/main/java/net/spy/memcached/ops/OperationCallback.java new file mode 100644 index 000000000..2093a453c --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/OperationCallback.java @@ -0,0 +1,21 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.ops; + +/** + * Callback that's invoked with the response of an operation. + */ +public interface OperationCallback { + + /** + * Method invoked with the status when the operation is complete. + * + * @param status the result of the operation + */ + void receivedStatus(OperationStatus status); + + /** + * Called whenever an operation completes. + */ + void complete(); +} diff --git a/src/main/java/net/spy/memcached/ops/OperationErrorType.java b/src/main/java/net/spy/memcached/ops/OperationErrorType.java new file mode 100644 index 000000000..4607b63ea --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/OperationErrorType.java @@ -0,0 +1,20 @@ +package net.spy.memcached.ops; + +/** + * Error classification. + */ +public enum OperationErrorType { + /** + * General error. + */ + GENERAL, + /** + * Error that occurred because the client did something stupid. + */ + CLIENT, + /** + * Error that occurred because the server did something stupid. + */ + SERVER; + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/OperationException.java b/src/main/java/net/spy/memcached/ops/OperationException.java new file mode 100644 index 000000000..ab267757a --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/OperationException.java @@ -0,0 +1,49 @@ +package net.spy.memcached.ops; + +import java.io.IOException; + + +/** + * Exceptions thrown when protocol errors occur. + */ +public final class OperationException extends IOException { + + private final OperationErrorType type; + + /** + * General exception (no message). + */ + public OperationException() { + super(); + type=OperationErrorType.GENERAL; + } + + /** + * Exception with a message. + * + * @param eType the type of error that occurred + * @param msg the error message + */ + public OperationException(OperationErrorType eType, String msg) { + super(msg); + type=eType; + } + + /** + * Get the type of error. + */ + public OperationErrorType getType() { + return type; + } + + @Override + public String toString() { + String rv=null; + if(type == OperationErrorType.GENERAL) { + rv="OperationException: " + type; + } else { + rv="OperationException: " + type + ": " + getMessage(); + } + return rv; + } +} diff --git a/src/main/java/net/spy/memcached/ops/OperationQueueFactory.java b/src/main/java/net/spy/memcached/ops/OperationQueueFactory.java new file mode 100644 index 000000000..d0e052a92 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/OperationQueueFactory.java @@ -0,0 +1,16 @@ +package net.spy.memcached.ops; + +import java.util.concurrent.BlockingQueue; + + +/** + * Factory used for creating operation queues. + */ +public interface OperationQueueFactory { + + /** + * Create an instance of a queue. + */ + BlockingQueue create(); + +} diff --git a/src/main/java/net/spy/memcached/ops/OperationState.java b/src/main/java/net/spy/memcached/ops/OperationState.java new file mode 100644 index 000000000..58e01c2d0 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/OperationState.java @@ -0,0 +1,39 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +/** + * State of this operation. + */ +public enum OperationState { + /** + * State indicating this operation is writing data to the server. + */ + WRITING, + /** + * State indicating this operation is reading data from the server. + */ + READING, + /** + * State indicating this operation is complete. + */ + COMPLETE, + /** + * State indicating this operation timed out without completing. + */ + TIMEDOUT +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/OperationStatus.java b/src/main/java/net/spy/memcached/ops/OperationStatus.java new file mode 100644 index 000000000..465cd1b1c --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/OperationStatus.java @@ -0,0 +1,35 @@ +package net.spy.memcached.ops; + +/** + * Status indicator. + */ +public class OperationStatus { + + private final boolean isSuccess; + private final String message; + + public OperationStatus(boolean success, String msg) { + super(); + isSuccess = success; + message = msg; + } + + /** + * Does this status indicate success? + */ + public boolean isSuccess() { + return isSuccess; + } + + /** + * Get the message included as part of this status. + */ + public String getMessage() { + return message; + } + + @Override + public String toString() { + return "{OperationStatus success=" + isSuccess + ": " + message + "}"; + } +} diff --git a/src/main/java/net/spy/memcached/ops/SASLAuthOperation.java b/src/main/java/net/spy/memcached/ops/SASLAuthOperation.java new file mode 100644 index 000000000..e547c00cb --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/SASLAuthOperation.java @@ -0,0 +1,8 @@ +package net.spy.memcached.ops; + +/** + * Operation for beginning a SASL auth cycle. + */ +public interface SASLAuthOperation extends Operation { + // nothing +} diff --git a/src/main/java/net/spy/memcached/ops/SASLMechsOperation.java b/src/main/java/net/spy/memcached/ops/SASLMechsOperation.java new file mode 100644 index 000000000..08c625886 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/SASLMechsOperation.java @@ -0,0 +1,8 @@ +package net.spy.memcached.ops; + +/** + * Operation for listing supported SASL mechanisms. + */ +public interface SASLMechsOperation extends Operation { + // Nothing. +} diff --git a/src/main/java/net/spy/memcached/ops/SASLStepOperation.java b/src/main/java/net/spy/memcached/ops/SASLStepOperation.java new file mode 100644 index 000000000..f7df9855b --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/SASLStepOperation.java @@ -0,0 +1,8 @@ +package net.spy.memcached.ops; + +/** + * Operation for proceeding in a SASL auth negotiation. + */ +public interface SASLStepOperation extends Operation { + // nothing +} diff --git a/src/main/java/net/spy/memcached/ops/SetAttrOperation.java b/src/main/java/net/spy/memcached/ops/SetAttrOperation.java new file mode 100644 index 000000000..c72ddf9cd --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/SetAttrOperation.java @@ -0,0 +1,28 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.ops; + +import net.spy.memcached.collection.Attributes; + +/** + * SetAttr operation. + */ +public interface SetAttrOperation extends KeyedOperation { + + Attributes getAttributes(); + +} diff --git a/src/main/java/net/spy/memcached/ops/StatsOperation.java b/src/main/java/net/spy/memcached/ops/StatsOperation.java new file mode 100644 index 000000000..29ff2926f --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/StatsOperation.java @@ -0,0 +1,22 @@ +package net.spy.memcached.ops; + + +/** + * Stats fetching operation. + */ +public interface StatsOperation extends Operation { + + /** + * Callback for stats operation. + */ + interface Callback extends OperationCallback { + /** + * Invoked once for every stat returned from the server. + * + * @param name the name of the stat + * @param val the stat value. + */ + void gotStat(String name, String val); + } + // nothing +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/StoreOperation.java b/src/main/java/net/spy/memcached/ops/StoreOperation.java new file mode 100644 index 000000000..b78c4b6ce --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/StoreOperation.java @@ -0,0 +1,32 @@ +package net.spy.memcached.ops; + +/** + * Operation that represents object storage. + */ +public interface StoreOperation extends KeyedOperation { + + /** + * Get the store type used by this operation. + */ + StoreType getStoreType(); + + /** + * Get the flags to be set. + */ + int getFlags(); + + /** + * Get the expiration value to be set. + */ + int getExpiration(); + + /** + * Get the bytes to be set during this operation. + * + *

+ * Note, this returns an exact reference to the bytes and the data + * must not be modified. + *

+ */ + byte[] getData(); +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/StoreType.java b/src/main/java/net/spy/memcached/ops/StoreType.java new file mode 100644 index 000000000..c15f69e37 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/StoreType.java @@ -0,0 +1,21 @@ +package net.spy.memcached.ops; + +/** + * The type of storage operation to perform. + */ +public enum StoreType { + /** + * Unconditionally store a value in the cache. + */ + set, + /** + * Store a value in the cache iff there is not already something stored + * for the given key. + */ + add, + /** + * Store a value in the cache iff there is already something stored for + * the given key. + */ + replace +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/VersionOperation.java b/src/main/java/net/spy/memcached/ops/VersionOperation.java new file mode 100644 index 000000000..54e64892a --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/VersionOperation.java @@ -0,0 +1,8 @@ +package net.spy.memcached.ops; + +/** + * Version operation. + */ +public interface VersionOperation extends Operation { + // nothing +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/ops/package.html b/src/main/java/net/spy/memcached/ops/package.html new file mode 100644 index 000000000..6d7c57324 --- /dev/null +++ b/src/main/java/net/spy/memcached/ops/package.html @@ -0,0 +1,14 @@ + + + + + + Fundamental protocol operation interfaces + + + +

Fundamental protocol operation interfaces

+ + + diff --git a/src/main/java/net/spy/memcached/overview.html b/src/main/java/net/spy/memcached/overview.html new file mode 100644 index 000000000..24afabe81 --- /dev/null +++ b/src/main/java/net/spy/memcached/overview.html @@ -0,0 +1,26 @@ + + + + + + Welcome to spymemcached + + + + +

Welcome to spymemcached.

+ +

+ Get a {@link net.spy.memcached.MemcachedClient} object and start setting and + getting stuff in memcached. +

+ +

+ You may also find the + online examples + helpful. +

+ + + diff --git a/src/main/java/net/spy/memcached/package.html b/src/main/java/net/spy/memcached/package.html new file mode 100644 index 000000000..d660ab502 --- /dev/null +++ b/src/main/java/net/spy/memcached/package.html @@ -0,0 +1,32 @@ + + + + + + Memcached client and transformation utils + + + +

Memcached client and transformation utils

+ +

+ Usage should be pretty straightforward. Get a {@link + net.spy.memcached.MemcachedClient} object and start setting and + getting stuff in memcached. +

+

+ All operations are asynchronous internally, but most at least provide + synchronous convenience interfaces. Some only provide synchronous + interfaces (getVersion, getStats) and some only provide asynchronous + interfaces (delete, flush). That'll probably all get cleared up if it + bothers anyone. +

+

+ You may also find the + online examples + helpful. +

+ + + diff --git a/src/main/java/net/spy/memcached/plugin/FrontCacheGetFuture.java b/src/main/java/net/spy/memcached/plugin/FrontCacheGetFuture.java new file mode 100644 index 000000000..1d2ad9234 --- /dev/null +++ b/src/main/java/net/spy/memcached/plugin/FrontCacheGetFuture.java @@ -0,0 +1,72 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.plugin; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.sf.ehcache.Element; + +/** + * Future returned for GET operations. + * + * Not intended for general use. + * + * @param Type of object returned from the get + */ +public class FrontCacheGetFuture implements Future { + + Element element; + + public FrontCacheGetFuture(Element element) { + this.element = element; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public T get() throws InterruptedException, ExecutionException { + return getValue(); + } + + @SuppressWarnings("unchecked") + private T getValue() { + return (T) this.element.getObjectValue(); + } + + @Override + public T get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, TimeoutException { + return getValue(); + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return false; + } + +} diff --git a/src/main/java/net/spy/memcached/plugin/FrontCacheMemcachedClient.java b/src/main/java/net/spy/memcached/plugin/FrontCacheMemcachedClient.java new file mode 100644 index 000000000..f0b551505 --- /dev/null +++ b/src/main/java/net/spy/memcached/plugin/FrontCacheMemcachedClient.java @@ -0,0 +1,102 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.plugin; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.concurrent.Future; + +import net.sf.ehcache.Element; +import net.spy.memcached.ConnectionFactory; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Front cache for some Arcus commands. + * For now, it supports get commands. The front cache stores the value from a get operation. + * A subsequent get operation first checks the cache. If the key is found in the cache, + * it is returned from the front cache. If not, the get command goes to the server as usual. + * + * Cache parameters (name, size, expiration time) are from ConnectionFactory. + * @see net.spy.memcached.ConnectionFactoryBuilder + * @see net.spy.memcached.plugin.LocalCacheManager + */ +public class FrontCacheMemcachedClient extends MemcachedClient { + + /** + * Create the memcached client and the front cache. + * + * @param cf the connection factory to configure connections for this client + * @param addrs the socket addresses for the memcached servers + * @throws IOException if connections cannot be established + */ + public FrontCacheMemcachedClient(ConnectionFactory cf, + List addrs) throws IOException { + super(cf, addrs); + + if (cf.getMaxFrontCacheElements() > 0) { + String cacheName = cf.getFrontCacheName(); + int maxElements = cf.getMaxFrontCacheElements(); + int timeToLiveSeconds = cf.getFrontCacheExpireTime(); + // TODO add an additional option + // int timeToIdleSeconds = timeToLiveSeconds; + + localCacheManager = new LocalCacheManager(cacheName, maxElements, + timeToLiveSeconds); + } + } + + /** + * Get the value of the key. + * Check the local cache first. If the key is not found, send the command to the server. + * + * @param key the key to fetch + * @param tc the transcoder to serialize and unserialize value + * @return a future that will hold the value of the key + */ + @Override + public Future asyncGet(final String key, final Transcoder tc) { + Element frontElement = null; + + if (localCacheManager != null) { + frontElement = localCacheManager.getElement(key); + } + + if (frontElement == null) { + return super.asyncGet(key, tc); + } else { + return new FrontCacheGetFuture(frontElement); + } + } + + /** + * Delete the key. + * Delete the key from the local cache before sending the command to the server. + * + * @param key the key to delete + * @return a future that will hold success/error status of the operation + */ + @Override + public Future delete(String key) { + if (localCacheManager != null) { + localCacheManager.delete(key); + } + return super.delete(key); + } + +} diff --git a/src/main/java/net/spy/memcached/plugin/LocalCacheManager.java b/src/main/java/net/spy/memcached/plugin/LocalCacheManager.java new file mode 100644 index 000000000..68bd1afce --- /dev/null +++ b/src/main/java/net/spy/memcached/plugin/LocalCacheManager.java @@ -0,0 +1,179 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.plugin; + +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +import net.sf.ehcache.Cache; +import net.sf.ehcache.CacheManager; +import net.sf.ehcache.Element; +import net.sf.ehcache.store.MemoryStoreEvictionPolicy; +import net.spy.memcached.compat.log.Logger; +import net.spy.memcached.compat.log.LoggerFactory; +import net.spy.memcached.transcoders.Transcoder; + +/** + * Local cache storage based on ehcache. + */ +public class LocalCacheManager { + + private Logger logger = LoggerFactory.getLogger(getClass()); + + protected Cache cache; + protected String name; + + public LocalCacheManager() { + this("DEFAULT_ARCUS_LOCAL_CACHE"); + } + + public LocalCacheManager(String name) { + this.name = name; + // create a undecorated Cache object. + this.cache = CacheManager.getInstance().getCache(name); + } + + public LocalCacheManager(String name, int max, int exptime) { + this.cache = CacheManager.getInstance().getCache(name); + if (cache == null) { + this.cache = new Cache(name, max, MemoryStoreEvictionPolicy.LRU, + false, "", false, exptime, exptime, false, 60, null); + CacheManager.getInstance().addCache(cache); + + if (logger.isInfoEnabled()) { + logger.info("Arcus k/v local cache is enabled : %s", cache.toString()); + } + } + } + + @SuppressWarnings("unchecked") + public T get(String key, Transcoder tc) { + if (cache == null) { + return null; + } + + try { + Element element = cache.get(key); + return element == null ? null : (T) element.getObjectValue(); + } catch (Exception e) { + logger.info("failed to get from the local cache : %s", e.getMessage()); + return null; + } + } + + public Future asyncGet(final String key, final Transcoder tc) { + Task task = new Task(new Callable() { + public T call() throws Exception { + return get(key, tc); + } + }); + return task; + } + + public Future asyncPreFetch(final String key, final Transcoder tc) { + Task task = new Task(new Callable() { + // pre-fetch the locally cached data. + T v = get(key, tc); + + public T call() throws Exception { + return v; + } + }); + return task; + } + + public Element getElement(String key) { + return cache.get(key); + } + + public boolean put(String k, T v) { + if (v == null) { + return false; + } + + try { + cache.put(new Element(k, v)); + return true; + } catch (Exception e) { + if (logger.isInfoEnabled()) { + logger.info("failed to put to the local cache : %s", e.getMessage()); + } + return false; + } + } + + public boolean put(String k, Future future, long timeout) { + if (future == null) { + return false; + } + + try { + T v = future.get(timeout, TimeUnit.MILLISECONDS); + return put(k, v); + } catch (Exception e) { + logger.info("failed to put to the local cache : %s", e.getMessage()); + return false; + } + } + + public void delete(String k) { + try { + cache.remove(k); + } catch (Exception e) { + logger.info("failed to remove the locally cached item : %s", e.getMessage()); + } + } + + public static class Task extends FutureTask { + private final AtomicBoolean isRunning = new AtomicBoolean(false); + + public Task(Callable callable) { + super(callable); + } + + @Override + public T get() throws InterruptedException, ExecutionException { + this.run(); + return super.get(); + } + + @Override + public T get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, TimeoutException { + this.run(); + return super.get(timeout, unit); + } + + @Override + public void run() { + if (this.isRunning.compareAndSet(false, true)) { + super.run(); + } + } + } + + @Override + public String toString() { + return cache.toString(); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/BaseOperationImpl.java b/src/main/java/net/spy/memcached/protocol/BaseOperationImpl.java new file mode 100644 index 000000000..ec262b29a --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/BaseOperationImpl.java @@ -0,0 +1,165 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import net.spy.memcached.MemcachedNode; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.ops.CancelledOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationErrorType; +import net.spy.memcached.ops.OperationException; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Base class for protocol-specific operation implementations. + */ +public abstract class BaseOperationImpl extends SpyObject { + + /** + * Status object for canceled operations. + */ + public static final OperationStatus CANCELLED = + new CancelledOperationStatus(); + private OperationState state = OperationState.WRITING; + private ByteBuffer cmd = null; + private boolean cancelled = false; + private OperationException exception = null; + protected OperationCallback callback = null; + private volatile MemcachedNode handlingNode = null; + + public BaseOperationImpl() { + super(); + } + + /** + * Get the operation callback associated with this operation. + */ + public final OperationCallback getCallback() { + return callback; + } + + /** + * Set the callback for this instance. + */ + protected void setCallback(OperationCallback to) { + callback=to; + } + + public final boolean isCancelled() { + return cancelled; + } + + public final boolean hasErrored() { + return exception != null; + } + + public final OperationException getException() { + return exception; + } + + public final void cancel() { + cancelled=true; + wasCancelled(); + callback.complete(); + } + + /** + * This is called on each subclass whenever an operation was cancelled. + */ + protected void wasCancelled() { + getLogger().debug("was cancelled."); + } + + public final OperationState getState() { + return state; + } + + public final ByteBuffer getBuffer() { + return cmd; + } + + /** + * Set the write buffer for this operation. + */ + protected final void setBuffer(ByteBuffer to) { + assert to != null : "Trying to set buffer to null"; + cmd=to; + cmd.mark(); + } + + /** + * Transition the state of this operation to the given state. + */ + protected final void transitionState(OperationState newState) { + getLogger().debug("Transitioned state from %s to %s", state, newState); + state=newState; + // Discard our buffer when we no longer need it. + if(state != OperationState.WRITING) { + cmd=null; + } + if(state == OperationState.COMPLETE) { + callback.complete(); + } + if(state == OperationState.TIMEDOUT) { + cmd = null; + callback.complete(); + } + } + + public final void writeComplete() { + transitionState(OperationState.READING); + } + + public abstract void initialize(); + + public abstract void readFromBuffer(ByteBuffer data) throws IOException; + + protected void handleError(OperationErrorType eType, String line) + throws IOException { + getLogger().error("Error: %s", line); + switch(eType) { + case GENERAL: + exception=new OperationException(); + break; + case SERVER: + exception=new OperationException(eType, line); + break; + case CLIENT: + exception=new OperationException(eType, line); + break; + default: assert false; + } + transitionState(OperationState.COMPLETE); + throw exception; + } + + public void handleRead(ByteBuffer data) { + assert false; + } + + public MemcachedNode getHandlingNode() { + return handlingNode; + } + + public void setHandlingNode(MemcachedNode to) { + handlingNode = to; + } +} diff --git a/src/main/java/net/spy/memcached/protocol/GetCallbackWrapper.java b/src/main/java/net/spy/memcached/protocol/GetCallbackWrapper.java new file mode 100644 index 000000000..bc14842ce --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/GetCallbackWrapper.java @@ -0,0 +1,48 @@ +/** + * + */ +package net.spy.memcached.protocol; + +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.OperationStatus; + +/** + * Wrapper callback for use in optimized gets. + */ +public class GetCallbackWrapper implements GetOperation.Callback { + + private static final OperationStatus END= + new OperationStatus(true, "END"); + + private boolean completed=false; + private int remainingKeys=0; + private GetOperation.Callback cb=null; + + public GetCallbackWrapper(int k, GetOperation.Callback c) { + super(); + remainingKeys=k; + cb=c; + } + + public void gotData(String key, int flags, byte[] data) { + assert !completed : "Got data for a completed wrapped op"; + cb.gotData(key, flags, data); + if(--remainingKeys == 0) { + // Fake a status line + receivedStatus(END); + } + } + + public void receivedStatus(OperationStatus status) { + if(!completed) { + cb.receivedStatus(status); + } + } + + public void complete() { + assert !completed; + cb.complete(); + completed=true; + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/ProxyCallback.java b/src/main/java/net/spy/memcached/protocol/ProxyCallback.java new file mode 100644 index 000000000..269c07f85 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ProxyCallback.java @@ -0,0 +1,65 @@ +/** + * + */ +package net.spy.memcached.protocol; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.OperationStatus; + +/** + * Proxy callback used for dispatching callbacks over optimized gets. + */ +public class ProxyCallback implements GetOperation.Callback { + + private final Map> callbacks= + new HashMap>(); + private final Collection allCallbacks= + new ArrayList(); + + public void addCallbacks(GetOperation o) { + GetOperation.Callback c=new GetCallbackWrapper(o.getKeys().size(), + (GetOperation.Callback)o.getCallback()); + allCallbacks.add(c); + for(String s : o.getKeys()) { + Collection cbs=callbacks.get(s); + if(cbs == null) { + cbs=new ArrayList(); + callbacks.put(s, cbs); + } + cbs.add(c); + } + } + + public void gotData(String key, int flags, byte[] data) { + Collection cbs=callbacks.get(key); + assert cbs != null : "No callbacks for key " + key; + for(GetOperation.Callback c : cbs) { + c.gotData(key, flags, data); + } + } + + public void receivedStatus(OperationStatus status) { + for(GetOperation.Callback c : allCallbacks) { + c.receivedStatus(status); + } + } + + public void complete() { + for(GetOperation.Callback c : allCallbacks) { + c.complete(); + } + } + + public int numKeys() { + return callbacks.size(); + } + + public int numCallbacks() { + return allCallbacks.size(); + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/TCPMemcachedNodeImpl.java b/src/main/java/net/spy/memcached/protocol/TCPMemcachedNodeImpl.java new file mode 100644 index 000000000..906026fda --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/TCPMemcachedNodeImpl.java @@ -0,0 +1,565 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol; + +import java.io.IOException; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collection; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import net.spy.memcached.CacheMonitor; +import net.spy.memcached.MemcachedNode; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; + +/** + * Represents a node with the memcached cluster, along with buffering and + * operation queues. + */ +public abstract class TCPMemcachedNodeImpl extends SpyObject + implements MemcachedNode { + + private final SocketAddress socketAddress; + private final ByteBuffer rbuf; + private final ByteBuffer wbuf; + protected final BlockingQueue writeQ; + private final BlockingQueue readQ; + private final BlockingQueue inputQueue; + private final long opQueueMaxBlockTime; + // This has been declared volatile so it can be used as an availability + // indicator. + private volatile int reconnectAttempt=1; + private SocketChannel channel; + private int toWrite=0; + protected Operation optimizedOp=null; + private volatile SelectionKey sk=null; + private boolean shouldAuth=false; + private CountDownLatch authLatch; + private ArrayList reconnectBlocked; + + // operation Future.get timeout counter + private final AtomicInteger continuousTimeout = new AtomicInteger(0); + + // fake node + private boolean isFake = false; + + public boolean isFake() { + return isFake; + } + + public TCPMemcachedNodeImpl(SocketAddress sa, SocketChannel c, + int bufSize, BlockingQueue rq, + BlockingQueue wq, BlockingQueue iq, + long opQueueMaxBlockTime, boolean waitForAuth) { + super(); + assert sa != null : "No SocketAddress"; + assert c != null : "No SocketChannel"; + assert bufSize > 0 : "Invalid buffer size: " + bufSize; + assert rq != null : "No operation read queue"; + assert wq != null : "No operation write queue"; + assert iq != null : "No input queue"; + socketAddress=sa; + setChannel(c); + rbuf=ByteBuffer.allocate(bufSize); + wbuf=ByteBuffer.allocate(bufSize); + getWbuf().clear(); + readQ=rq; + writeQ=wq; + inputQueue=iq; + this.opQueueMaxBlockTime = opQueueMaxBlockTime; + shouldAuth = waitForAuth; + setupForAuth(); + + // is this a fake node? + isFake = ("/" + CacheMonitor.FAKE_SERVER_NODE).equals(sa.toString()); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#copyInputQueue() + */ + public final void copyInputQueue() { + Collection tmp=new ArrayList(); + + // don't drain more than we have space to place + inputQueue.drainTo(tmp, writeQ.remainingCapacity()); + + writeQ.addAll(tmp); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#destroyInputQueue() + */ + public Collection destroyInputQueue() { + Collection rv=new ArrayList(); + inputQueue.drainTo(rv); + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#setupResend() + */ + public final void setupResend() { + // First, reset the current write op, or cancel it if we should + // be authenticating + Operation op=getCurrentWriteOp(); + if(shouldAuth && op != null) { + op.cancel(); + } else if(op != null) { + ByteBuffer buf=op.getBuffer(); + if(buf != null) { + buf.reset(); + } else { + getLogger().info("No buffer for current write op, removing"); + removeCurrentWriteOp(); + } + } + // Now cancel all the pending read operations. Might be better to + // to requeue them. + while(hasReadOp()) { + op=removeCurrentReadOp(); + if (op != getCurrentWriteOp()) { + getLogger().warn("Discarding partially completed op: %s", op); + op.cancel(); + } + } + + while(shouldAuth && hasWriteOp()) { + op=removeCurrentWriteOp(); + getLogger().warn("Discarding partially completed op: %s", op); + op.cancel(); + } + + + getWbuf().clear(); + getRbuf().clear(); + toWrite=0; + } + + // Prepare the pending operations. Return true if there are any pending + // ops + private boolean preparePending() { + // Copy the input queue into the write queue. + copyInputQueue(); + + // Now check the ops + Operation nextOp=getCurrentWriteOp(); + while(nextOp != null && nextOp.isCancelled()) { + getLogger().info("Removing cancelled operation: %s", nextOp); + removeCurrentWriteOp(); + nextOp=getCurrentWriteOp(); + } + return nextOp != null; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#fillWriteBuffer(boolean) + */ + public final void fillWriteBuffer(boolean shouldOptimize) { + if(toWrite == 0 && readQ.remainingCapacity() > 0) { + getWbuf().clear(); + Operation o=getCurrentWriteOp(); + while(o != null && toWrite < getWbuf().capacity()) { + assert o.getState() == OperationState.WRITING; + // This isn't the most optimal way to do this, but it hints + // at a larger design problem that may need to be taken care + // if in the bowels of the client. + // In practice, readQ should be small, however. + if(!readQ.contains(o)) { + readQ.add(o); + } + + ByteBuffer obuf=o.getBuffer(); + assert obuf != null : "Didn't get a write buffer from " + o; + int bytesToCopy=Math.min(getWbuf().remaining(), + obuf.remaining()); + byte b[]=new byte[bytesToCopy]; + obuf.get(b); + getWbuf().put(b); + getLogger().debug("After copying stuff from %s: %s", + o, getWbuf()); + if(!o.getBuffer().hasRemaining()) { + o.writeComplete(); + transitionWriteItem(); + + preparePending(); + if(shouldOptimize) { + optimize(); + } + + o=getCurrentWriteOp(); + } + toWrite += bytesToCopy; + } + getWbuf().flip(); + assert toWrite <= getWbuf().capacity() + : "toWrite exceeded capacity: " + this; + assert toWrite == getWbuf().remaining() + : "Expected " + toWrite + " remaining, got " + + getWbuf().remaining(); + } else { + getLogger().debug("Buffer is full, skipping"); + } + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#transitionWriteItem() + */ + public final void transitionWriteItem() { + Operation op=removeCurrentWriteOp(); + assert op != null : "There is no write item to transition"; + getLogger().debug("Finished writing %s", op); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#optimize() + */ + protected abstract void optimize(); + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getCurrentReadOp() + */ + public final Operation getCurrentReadOp() { + return readQ.peek(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#removeCurrentReadOp() + */ + public final Operation removeCurrentReadOp() { + return readQ.remove(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getCurrentWriteOp() + */ + public final Operation getCurrentWriteOp() { + return optimizedOp == null ? writeQ.peek() : optimizedOp; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#removeCurrentWriteOp() + */ + public final Operation removeCurrentWriteOp() { + Operation rv=optimizedOp; + if(rv == null) { + rv=writeQ.remove(); + } else { + optimizedOp=null; + } + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#hasReadOp() + */ + public final boolean hasReadOp() { + return !readQ.isEmpty(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#hasWriteOp() + */ + public final boolean hasWriteOp() { + return !(optimizedOp == null && writeQ.isEmpty()); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#addOp(net.spy.memcached.ops.Operation) + */ + public final void addOp(Operation op) { + try { + if (!authLatch.await(1, TimeUnit.SECONDS)) { + op.cancel(); + getLogger().warn( + "Operation canceled because authentication " + + "or reconnection and authentication has " + + "taken more than one second to complete."); + getLogger().debug("Canceled operation %s", op.toString()); + return; + } + if(!inputQueue.offer(op, opQueueMaxBlockTime, + TimeUnit.MILLISECONDS)) { + throw new IllegalStateException("Timed out waiting to add " + + op + "(max wait=" + opQueueMaxBlockTime + "ms)"); + } + } catch(InterruptedException e) { + // Restore the interrupted status + Thread.currentThread().interrupt(); + throw new IllegalStateException("Interrupted while waiting to add " + + op); + } + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#insertOp(net.spy.memcached.ops.Operation) + */ + public final void insertOp(Operation op) { + ArrayList tmp = new ArrayList( + inputQueue.size() + 1); + tmp.add(op); + inputQueue.drainTo(tmp); + inputQueue.addAll(tmp); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getSelectionOps() + */ + public final int getSelectionOps() { + int rv=0; + if(getChannel().isConnected()) { + if(hasReadOp()) { + rv |= SelectionKey.OP_READ; + } + if(toWrite > 0 || hasWriteOp()) { + rv |= SelectionKey.OP_WRITE; + } + } else { + rv = SelectionKey.OP_CONNECT; + } + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getRbuf() + */ + public final ByteBuffer getRbuf() { + return rbuf; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getWbuf() + */ + public final ByteBuffer getWbuf() { + return wbuf; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getSocketAddress() + */ + public final SocketAddress getSocketAddress() { + return socketAddress; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#isActive() + */ + public final boolean isActive() { + return !isFake && reconnectAttempt == 0 + && getChannel() != null && getChannel().isConnected(); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#reconnecting() + */ + public final void reconnecting() { + reconnectAttempt++; + continuousTimeout.set(0); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#connected() + */ + public final void connected() { + reconnectAttempt=0; + continuousTimeout.set(0); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getReconnectCount() + */ + public final int getReconnectCount() { + return reconnectAttempt; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#toString() + */ + @Override + public final String toString() { + int sops=0; + if(getSk()!= null && getSk().isValid()) { + sops=getSk().interestOps(); + } + int rsize=readQ.size() + (optimizedOp == null ? 0 : 1); + int wsize=writeQ.size(); + int isize=inputQueue.size(); + return "{QA sa=" + getSocketAddress() + ", #Rops=" + rsize + + ", #Wops=" + wsize + + ", #iq=" + isize + + ", topRop=" + getCurrentReadOp() + + ", topWop=" + getCurrentWriteOp() + + ", toWrite=" + toWrite + + ", interested=" + sops + "}"; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#registerChannel(java.nio.channels.SocketChannel, java.nio.channels.SelectionKey) + */ + public final void registerChannel(SocketChannel ch, SelectionKey skey) { + setChannel(ch); + setSk(skey); + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#setChannel(java.nio.channels.SocketChannel) + */ + public final void setChannel(SocketChannel to) { + assert channel == null || !channel.isOpen() + : "Attempting to overwrite channel"; + channel = to; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getChannel() + */ + public final SocketChannel getChannel() { + return channel; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#setSk(java.nio.channels.SelectionKey) + */ + public final void setSk(SelectionKey to) { + sk = to; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getSk() + */ + public final SelectionKey getSk() { + return sk; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getBytesRemainingInBuffer() + */ + public final int getBytesRemainingToWrite() { + return toWrite; + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#writeSome() + */ + public final int writeSome() throws IOException { + int wrote=channel.write(wbuf); + assert wrote >= 0 : "Wrote negative bytes?"; + toWrite -= wrote; + assert toWrite >= 0 + : "toWrite went negative after writing " + wrote + + " bytes for " + this; + getLogger().debug("Wrote %d bytes", wrote); + return wrote; + } + + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#setContinuousTimeout + */ + public void setContinuousTimeout(boolean timedOut) { + if (timedOut && isActive()) { + continuousTimeout.incrementAndGet(); + } else { + continuousTimeout.set(0); + } + } + + /* (non-Javadoc) + * @see net.spy.memcached.MemcachedNode#getContinuousTimeout + */ + public int getContinuousTimeout() { + return continuousTimeout.get(); + } + + + public final void fixupOps() { + // As the selection key can be changed at any point due to node + // failure, we'll grab the current volatile value and configure it. + SelectionKey s = sk; + if(s != null && s.isValid()) { + int iops=getSelectionOps(); + getLogger().debug("Setting interested opts to %d", iops); + s.interestOps(iops); + } else { + getLogger().debug("Selection key is not valid."); + } + } + + public final void authComplete() { + if (reconnectBlocked != null && reconnectBlocked.size() > 0 ) { + inputQueue.addAll(reconnectBlocked); + } + authLatch.countDown(); + } + + public final void setupForAuth() { + if (shouldAuth) { + authLatch = new CountDownLatch(1); + if (inputQueue.size() > 0) { + reconnectBlocked = new ArrayList( + inputQueue.size() + 1); + inputQueue.drainTo(reconnectBlocked); + } + assert(inputQueue.size() == 0); + setupResend(); + } else { + authLatch = new CountDownLatch(0); + } + } + + public final void shutdown() throws IOException { + if(channel != null) { + channel.close(); + sk = null; + if(toWrite > 0) { + getLogger().warn( + "Shut down with %d bytes remaining to write", + toWrite); + } + getLogger().debug("Shut down channel %s", channel); + } + } + + public int getInputQueueSize() { + return inputQueue.size(); + } + + public int getWriteQueueSize() { + return writeQ.size(); + } + + public int getReadQueueSize() { + return readQ.size(); + } + + @Override + public String getStatus() { + StringBuilder sb = new StringBuilder(); + sb.append("#iq=").append(getInputQueueSize()); + sb.append(" #Wops=").append(getWriteQueueSize()); + sb.append(" #Rops=").append(getReadQueueSize()); + sb.append(" #CT=").append(getContinuousTimeout()); + return sb.toString(); + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/AsciiMemcachedNodeImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/AsciiMemcachedNodeImpl.java new file mode 100644 index 000000000..deb9ea297 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/AsciiMemcachedNodeImpl.java @@ -0,0 +1,52 @@ +package net.spy.memcached.protocol.ascii; + +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.util.concurrent.BlockingQueue; + +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.protocol.ProxyCallback; +import net.spy.memcached.protocol.TCPMemcachedNodeImpl; + +/** + * Memcached node for the ASCII protocol. + */ +public final class AsciiMemcachedNodeImpl extends TCPMemcachedNodeImpl { + + public AsciiMemcachedNodeImpl(SocketAddress sa, SocketChannel c, + int bufSize, BlockingQueue rq, + BlockingQueue wq, BlockingQueue iq, Long opQueueMaxBlockTimeNs) { + super(sa, c, bufSize, rq, wq, iq, opQueueMaxBlockTimeNs, false); /* ascii never does auth */ + } + + @Override + protected void optimize() { + // make sure there are at least two get operations in a row before + // attempting to optimize them. + if(writeQ.peek() instanceof GetOperation) { + optimizedOp=writeQ.remove(); + if(writeQ.peek() instanceof GetOperation) { + OptimizedGetImpl og=new OptimizedGetImpl( + (GetOperation)optimizedOp); + optimizedOp=og; + + while(writeQ.peek() instanceof GetOperation) { + GetOperationImpl o=(GetOperationImpl) writeQ.remove(); + if(!o.isCancelled()) { + og.addOperation(o); + } + } + + // Initialize the new mega get + optimizedOp.initialize(); + assert optimizedOp.getState() == OperationState.WRITING; + ProxyCallback pcb=(ProxyCallback) og.getCallback(); + getLogger().debug("Set up %s with %s keys and %s callbacks", + this, pcb.numKeys(), pcb.numCallbacks()); + } + } + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/AsciiOperationFactory.java b/src/main/java/net/spy/memcached/protocol/ascii/AsciiOperationFactory.java new file mode 100644 index 000000000..e8e93da2b --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/AsciiOperationFactory.java @@ -0,0 +1,295 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; + +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.BTreeFindPosition; +import net.spy.memcached.collection.BTreeGetBulk; +import net.spy.memcached.collection.BTreeGetByPosition; +import net.spy.memcached.collection.BTreeSMGet; +import net.spy.memcached.collection.BTreeStoreAndGet; +import net.spy.memcached.collection.CollectionBulkStore; +import net.spy.memcached.collection.CollectionCount; +import net.spy.memcached.collection.CollectionCreate; +import net.spy.memcached.collection.CollectionDelete; +import net.spy.memcached.collection.CollectionExist; +import net.spy.memcached.collection.CollectionGet; +import net.spy.memcached.collection.CollectionMutate; +import net.spy.memcached.collection.CollectionPipedStore; +import net.spy.memcached.collection.CollectionPipedUpdate; +import net.spy.memcached.collection.CollectionStore; +import net.spy.memcached.collection.CollectionUpdate; +import net.spy.memcached.collection.SetPipedExist; +import net.spy.memcached.ops.BTreeFindPositionOperation; +import net.spy.memcached.ops.BTreeGetBulkOperation; +import net.spy.memcached.ops.BTreeGetByPositionOperation; +import net.spy.memcached.ops.BTreeSortMergeGetOperation; +import net.spy.memcached.ops.BTreeStoreAndGetOperation; +import net.spy.memcached.ops.BaseOperationFactory; +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.CollectionBulkStoreOperation; +import net.spy.memcached.ops.CollectionCountOperation; +import net.spy.memcached.ops.CollectionCreateOperation; +import net.spy.memcached.ops.CollectionDeleteOperation; +import net.spy.memcached.ops.CollectionExistOperation; +import net.spy.memcached.ops.CollectionGetOperation; +import net.spy.memcached.ops.CollectionMutateOperation; +import net.spy.memcached.ops.CollectionPipedExistOperation; +import net.spy.memcached.ops.CollectionPipedStoreOperation; +import net.spy.memcached.ops.CollectionPipedUpdateOperation; +import net.spy.memcached.ops.CollectionStoreOperation; +import net.spy.memcached.ops.CollectionUpdateOperation; +import net.spy.memcached.ops.ConcatenationOperation; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.ExtendedBTreeGetOperation; +import net.spy.memcached.ops.FlushOperation; +import net.spy.memcached.ops.GetAttrOperation; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.KeyedOperation; +import net.spy.memcached.ops.MultiGetOperationCallback; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.NoopOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.SASLAuthOperation; +import net.spy.memcached.ops.SASLMechsOperation; +import net.spy.memcached.ops.SASLStepOperation; +import net.spy.memcached.ops.SetAttrOperation; +import net.spy.memcached.ops.StatsOperation; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.ops.StoreType; +import net.spy.memcached.ops.VersionOperation; + +/** + * Operation factory for the ascii protocol. + */ +public class AsciiOperationFactory extends BaseOperationFactory { + + public DeleteOperation delete(String key, OperationCallback cb) { + return new DeleteOperationImpl(key, cb); + } + + public FlushOperation flush(int delay, OperationCallback cb) { + return new FlushOperationImpl(delay, cb); + } + + public GetOperation get(String key, GetOperation.Callback cb) { + return new GetOperationImpl(key, cb); + } + + public GetOperation get(Collection keys, GetOperation.Callback cb) { + return new GetOperationImpl(keys, cb); + } + + public GetsOperation gets(String key, GetsOperation.Callback cb) { + return new GetsOperationImpl(key, cb); + } + + public MutatorOperation mutate(Mutator m, String key, int by, + long def, int exp, OperationCallback cb) { + return new MutatorOperationImpl(m, key, by, def, exp, cb); + } + + public StatsOperation stats(String arg, StatsOperation.Callback cb) { + return new StatsOperationImpl(arg, cb); + } + + public StoreOperation store(StoreType storeType, String key, int flags, + int exp, byte[] data, OperationCallback cb) { + return new StoreOperationImpl(storeType, key, flags, exp, data, cb); + } + + public VersionOperation version(OperationCallback cb) { + return new VersionOperationImpl(cb); + } + + public NoopOperation noop(OperationCallback cb) { + return new VersionOperationImpl(cb); + } + + public CASOperation cas(StoreType type, String key, long casId, int flags, + int exp, byte[] data, OperationCallback cb) { + return new CASOperationImpl(key, casId, flags, exp, data, cb); + } + + public ConcatenationOperation cat(ConcatenationType catType, + long casId, + String key, byte[] data, OperationCallback cb) { + return new ConcatenationOperationImpl(catType, key, data, cb); + } + + @Override + protected Collection cloneGet(KeyedOperation op) { + Collection rv=new ArrayList(); + GetOperation.Callback callback = new MultiGetOperationCallback( + op.getCallback(), op.getKeys().size()); + for(String k : op.getKeys()) { + rv.add(get(k, callback)); + } + return rv; + } + + public SASLMechsOperation saslMechs(OperationCallback cb) { + throw new UnsupportedOperationException(); + } + + public SASLStepOperation saslStep(String[] mech, byte[] challenge, + String serverName, Map props, CallbackHandler cbh, + OperationCallback cb) { + throw new UnsupportedOperationException(); + } + + public SASLAuthOperation saslAuth(String[] mech, String serverName, + Map props, CallbackHandler cbh, OperationCallback cb) { + throw new UnsupportedOperationException(); + } + + public SetAttrOperation setAttr(String key, Attributes attrs, + OperationCallback cb) { + return new SetAttrOperationImpl(key, attrs, cb); + } + + public GetAttrOperation getAttr(String key, GetAttrOperation.Callback cb) { + return new GetAttrOperationImpl(key, cb); + } + + public CollectionStoreOperation collectionStore(String key, String subkey, + CollectionStore collectionStore, byte[] data, OperationCallback cb) { + return new CollectionStoreOperationImpl(key, subkey, + collectionStore, data, cb); + } + + public CollectionPipedStoreOperation collectionPipedStore(String key, + CollectionPipedStore store, OperationCallback cb) { + return new CollectionPipedStoreOperationImpl(key, store, cb); + } + + public CollectionGetOperation collectionGet(String key, + CollectionGet collectionGet, CollectionGetOperation.Callback cb) { + return new CollectionGetOperationImpl(key, collectionGet, cb); + } + + public CollectionGetOperation collectionGet2(String key, + CollectionGet collectionGet, ExtendedBTreeGetOperation.Callback cb) { + return new ExtendedBTreeGetOperationImpl(key, collectionGet, cb); + } + + public CollectionDeleteOperation collectionDelete(String key, + CollectionDelete collectionDelete, OperationCallback cb) { + return new CollectionDeleteOperationImpl(key, collectionDelete, cb); + } + + public CollectionExistOperation collectionExist(String key, String subkey, + CollectionExist collectionExist, OperationCallback cb) { + return new CollectionExistOperationImpl(key, subkey, collectionExist, cb); + } + + public CollectionCreateOperation collectionCreate(String key, + CollectionCreate collectionCreate, OperationCallback cb) { + return new CollectionCreateOperationImpl(key, collectionCreate, cb); + } + + public CollectionCountOperation collectionCount(String key, + CollectionCount collectionCount, OperationCallback cb) { + return new CollectionCountOperationImpl(key, collectionCount, cb); + } + + public FlushOperation flush(String prefix, int delay, boolean noreply, OperationCallback cb) { + return new FlushByPrefixOperationImpl(prefix, delay, noreply, cb); + } + + public BTreeSortMergeGetOperation bopsmget(BTreeSMGet smGet, + BTreeSortMergeGetOperation.Callback cb) { + return new BTreeSortMergeGetOperationImpl(smGet, cb); + } + + @Override + public CollectionStoreOperation collectionUpsert(String key, String subkey, + CollectionStore collectionStore, byte[] data, + OperationCallback cb) { + return new CollectionUpsertOperationImpl(key, subkey, collectionStore, + data, cb); + } + + @Override + public CollectionUpdateOperation collectionUpdate(String key, + String subkey, CollectionUpdate collectionUpdate, byte[] data, + OperationCallback cb) { + return new CollectionUpdateOperationImpl(key, subkey, collectionUpdate, + data, cb); + } + + @Override + public CollectionPipedUpdateOperation collectionPipedUpdate(String key, + CollectionPipedUpdate update, OperationCallback cb) { + return new CollectionPipedUpdateOperationImpl(key, update, cb); + } + + @Override + public CollectionPipedExistOperation collectionPipedExist(String key, + SetPipedExist exist, OperationCallback cb) { + return new CollectionPipedExistOperationImpl(key, exist, cb); + } + + @Override + public CollectionBulkStoreOperation collectionBulkStore(List key, + CollectionBulkStore store, OperationCallback cb) { + return new CollectionBulkStoreOperationImpl(key, store, cb); + } + + @Override + public BTreeGetBulkOperation bopGetBulk(BTreeGetBulk getBulk, + BTreeGetBulkOperation.Callback cb) { + return new BTreeGetBulkOperationImpl(getBulk, cb); + } + + @Override + public CollectionMutateOperation collectionMutate(String key, + String subkey, CollectionMutate collectionMutate, + OperationCallback cb) { + return new CollectionMutateOperationImpl(key, subkey, collectionMutate, cb); + } + + @Override + public BTreeGetByPositionOperation bopGetByPosition(String key, + BTreeGetByPosition get, OperationCallback cb) { + return new BTreeGetByPositionOperationImpl(key, get, cb); + } + + @Override + public BTreeFindPositionOperation bopFindPosition(String key, + BTreeFindPosition get, OperationCallback cb) { + return new BTreeFindPositionOperationImpl(key, get, cb); + } + + @Override + public BTreeStoreAndGetOperation bopStoreAndGet(String key, + BTreeStoreAndGet get, byte[] dataToStore, OperationCallback cb) { + return new BTreeStoreAndGetOperationImpl(key, get, dataToStore, cb); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BTreeFindPositionOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BTreeFindPositionOperationImpl.java new file mode 100644 index 000000000..0f3ba5798 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BTreeFindPositionOperationImpl.java @@ -0,0 +1,134 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.BTreeFindPosition; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.BTreeFindPositionOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +public class BTreeFindPositionOperationImpl extends OperationImpl implements + BTreeFindPositionOperation { + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus POSITION = new CollectionOperationStatus( + true, "POSITION", CollectionResponse.OK); // OK is arbitrary response + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + + protected final String key; + protected final BTreeFindPosition get; + + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + public BTreeFindPositionOperationImpl(String key, BTreeFindPosition get, + OperationCallback cb) { + super(cb); + this.key = key; + this.get = get; + } + + @Override + public BTreeFindPosition getGet() { + return get; + } + + @Override + public void handleLine(String line) { + if (getLogger().isDebugEnabled()) { + getLogger().debug("Got line %s", line); + } + + Integer position = null; + + if (line.startsWith("POSITION=")) { + String[] stuff = line.split("="); + assert stuff.length == 2; + assert "POSITION".equals(stuff[0]); + + // FIXME exception-based conversion. + try { + // expected : POSITION= : 0 or positive integer + position = Integer.parseInt(stuff[1]); + BTreeFindPositionOperation.Callback cb = (BTreeFindPositionOperation.Callback) getCallback(); + cb.gotData(position); + getCallback().receivedStatus(POSITION); + } catch (Exception e) { + // expected : + } + } else { + OperationStatus status = matchStatus(line, NOT_FOUND, UNREADABLE, + BKEY_MISMATCH, TYPE_MISMATCH, NOT_FOUND_ELEMENT); + if (getLogger().isDebugEnabled()) { + getLogger().debug(status); + } + getCallback().receivedStatus(status); + } + + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String cmd = get.getCommand(); + String args = get.stringify(); + + ByteBuffer bb = ByteBuffer.allocate(cmd.length() + key.length() + + args.length() + 16); + + setArguments(bb, cmd, key, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BTreeGetBulkOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BTreeGetBulkOperationImpl.java new file mode 100644 index 000000000..2b7a31b31 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BTreeGetBulkOperationImpl.java @@ -0,0 +1,251 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Collection; + +import net.spy.memcached.collection.BTreeGetBulk; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.BTreeGetBulkOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to retrieve b+tree data with multiple keys + */ +public class BTreeGetBulkOperationImpl extends OperationImpl implements + BTreeGetBulkOperation { + + private final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream(); + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + + private static final OperationStatus OK = new CollectionOperationStatus( + true, "OK", CollectionResponse.OK); + private static final OperationStatus TRIMMED = new CollectionOperationStatus( + true, "TRIMMED", CollectionResponse.TRIMMED); + + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + + protected final BTreeGetBulk getBulk; + + protected int flags = 0; + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + public BTreeGetBulkOperationImpl(BTreeGetBulk getBulk, OperationCallback cb) { + super(cb); + this.getBulk = getBulk; + } + + public void handleLine(String line) { + getLogger().debug("Got line %s", line); + + if (line.startsWith("VALUE ")) { + readKey(line); + setReadType(OperationReadType.DATA); + } else { + OperationStatus status = matchStatus(line, END); + + getLogger().debug(status); + getCallback().receivedStatus(status); + + transitionState(OperationState.COMPLETE); + return; + } + } + + @Override + public final void handleRead(ByteBuffer bb) { + readValue(bb); + } + + private final void readKey(String line) { + // protocol : VALUE key OK flag count + String[] chunk = line.split(" "); + + OperationStatus status = matchStatus(chunk[2], OK, TRIMMED, NOT_FOUND, + NOT_FOUND_ELEMENT, OUT_OF_RANGE, TYPE_MISMATCH, BKEY_MISMATCH, + UNREADABLE); + + getBulk.decodeKeyHeader(line); + + BTreeGetBulkOperation.Callback cb = ((BTreeGetBulkOperation.Callback) getCallback()); + cb.gotKey(chunk[1], (chunk.length > 3) ? Integer.valueOf(chunk[4]) : -1, status); + } + + private final void readValue(ByteBuffer bb) { + // protocol : ELEMENT bkey [eflag] len value + if (lookingFor == '\0' && data == null) { + for (int i = 0; bb.remaining() > 0; i++) { + byte b = bb.get(); + + // Handle spaces. + if (b == ' ') { + spaceCount++; + + String l = new String(byteBuffer.toByteArray()); + + if (l.startsWith("ELEMENT")) { + if (getBulk.elementHeaderReady(spaceCount)) { + if (spaceCount == 3 && l.split(" ")[2].startsWith("0x")) { + byteBuffer.write(b); + continue; + } + + getBulk.decodeItemHeader(l); + data = new byte[getBulk.getDataLength()]; + byteBuffer.reset(); + spaceCount = 0; + break; + } + } + } + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + String line = byteBuffer.toString(); + + if (line.startsWith("VALUE")) { + readKey(line); + byteBuffer.reset(); + spaceCount = 0; + continue; + } else { + OperationStatus status = matchStatus(line, END); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + data = null; + break; + } + } + byteBuffer.write(b); + } + return; + } + + // Read data + // assert key != null; + assert data != null; + + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length : "readOffset is " + readOffset + + " data.length is " + data.length; + + getLogger() + .debug("readOffset: %d, length: %d", readOffset, data.length); + + if (lookingFor == '\0') { + int toRead = data.length - readOffset; + int available = bb.remaining(); + toRead = Math.min(toRead, available); + + getLogger().debug("Reading %d bytes", toRead); + + bb.get(data, readOffset, toRead); + readOffset += toRead; + } + + if (lookingFor == '\0' && readOffset == data.length) { + BTreeGetBulkOperation.Callback cb = (BTreeGetBulkOperation.Callback) getCallback(); + cb.gotElement(getBulk.getKey(), getBulk.getSubkey(), getBulk.getFlag(), getBulk.getEFlag(), data); + lookingFor = '\r'; + } + + if (lookingFor != '\0' && bb.hasRemaining()) { + do { + byte tmp = bb.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char) tmp; + + switch (lookingFor) { + case '\r': + lookingFor = '\n'; + break; + case '\n': + lookingFor = '\0'; + break; + default: + assert false : "Looking for unexpected char: " + + (char) lookingFor; + } + } while (lookingFor != '\0' && bb.hasRemaining()); + + if (lookingFor == '\0') { + data = null; + readOffset = 0; + } + } + } + + public void initialize() { + String cmd = getBulk.getCommand(); + String args = getBulk.stringify(); + + ByteBuffer bb = ByteBuffer.allocate(cmd.length() + args.length() + + getBulk.getCommaSeparatedKeys().length() + 16); + + setArguments(bb, cmd, args); + + setArguments(bb, getBulk.getCommaSeparatedKeys()); + + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return getBulk.getKeyList(); + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BTreeGetByPositionOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BTreeGetByPositionOperationImpl.java new file mode 100644 index 000000000..a6e8acf35 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BTreeGetByPositionOperationImpl.java @@ -0,0 +1,261 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.BTreeGetByPosition; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.BTreeGetByPositionOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +public class BTreeGetByPositionOperationImpl extends OperationImpl implements + BTreeGetByPositionOperation { + + private final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream(); + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + + protected final String key; + protected final BTreeGetByPosition get; + + protected int flags = 0; + protected int count = 0; + protected int pos = 0; + protected int posDiff = 0; + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + private Boolean hasEFlag = null; + + public BTreeGetByPositionOperationImpl(String key, + BTreeGetByPosition get, OperationCallback cb) { + super(cb); + this.key = key; + this.get = get; + } + + @Override + public BTreeGetByPosition getGet() { + return get; + } + + @Override + public void handleLine(String line) { + if (getLogger().isDebugEnabled()) { + getLogger().debug("Got line %s", line); + } + + // VALUE \r\n + if (line.startsWith("VALUE ")) { + String[] stuff = line.split(" "); + assert stuff.length == 3; + assert "VALUE".equals(stuff[0]); + + flags = Integer.parseInt(stuff[1]); + count = Integer.parseInt(stuff[2]); + + if (count > 0) { + // position counter + pos = get.isReversed() ? get.getPosTo() + count - 1 : get.getPosFrom(); + posDiff = get.isReversed() ? -1 : 1; + + // start to read actual data + setReadType(OperationReadType.DATA); + } + } else { + OperationStatus status = matchStatus(line, END, NOT_FOUND, + UNREADABLE, TYPE_MISMATCH, NOT_FOUND_ELEMENT); + if (getLogger().isDebugEnabled()) { + getLogger().debug(status); + } + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } + } + + @Override + public void handleRead(ByteBuffer bb) { + // Decode a data header. + if (lookingFor == '\0' && data == null) { + for (int i = 0; bb.remaining() > 0; i++) { + byte b = bb.get(); + // Handle spaces to parse the header. + if (b == ' ') { + // One-time check to find if this responses have eflags. + if (hasEFlag == null && spaceCount == BTreeGetByPosition.HEADER_EFLAG_POSITION + 1) { + String[] chunk = new String(byteBuffer.toByteArray()) + .split(" "); + if (chunk[BTreeGetByPosition.HEADER_EFLAG_POSITION].startsWith("0x")) { + hasEFlag = true; + } else { + hasEFlag = false; + } + } + + spaceCount++; + + // Parse the value header. + // FIXME this is not cool... please fix this :-( + int spaceReduced = (hasEFlag != null && hasEFlag) ? 1 : 0; + if (get.headerReady(spaceCount - spaceReduced)) { + get.decodeItemHeader(new String(byteBuffer.toByteArray())); + data = new byte[get.getDataLength()]; + byteBuffer.reset(); + spaceCount = 0; + hasEFlag = null; + break; + } + } + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + OperationStatus status = matchStatus(byteBuffer.toString(), + END, NOT_FOUND, UNREADABLE, TYPE_MISMATCH, + NOT_FOUND_ELEMENT); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Get complete!"); + } + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + data = null; + break; + } + + // Write to the result ByteBuffer + byteBuffer.write(b); + } + return; + } + + // Read data + assert key != null; + assert data != null; + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length + : "readOffset is " + readOffset + " data.length is " + data.length; + + if (getLogger().isDebugEnabled()) { + getLogger().debug("readOffset: %d, length: %d", readOffset, data.length); + } + + if (lookingFor == '\0') { + int toRead = data.length - readOffset; + int available = bb.remaining(); + toRead = Math.min(toRead, available); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Reading %d bytes", toRead); + } + + bb.get(data, readOffset, toRead); + readOffset += toRead; + } + + if (lookingFor == '\0' && readOffset == data.length) { + // put an element data. + BTreeGetByPositionOperation.Callback cb = (BTreeGetByPositionOperation.Callback) getCallback(); + cb.gotData(key, flags, pos, get.getBkey(), get.getEflag(), data); + + // next position. + pos += posDiff; + lookingFor = '\r'; + } + + if (lookingFor != '\0' && bb.hasRemaining()) { + do { + byte tmp = bb.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char) tmp; + + switch (lookingFor) { + case '\r': + lookingFor = '\n'; + break; + case '\n': + lookingFor = '\0'; + break; + default: + assert false : "Looking for unexpected char: " + + (char) lookingFor; + } + } while (lookingFor != '\0' && bb.hasRemaining()); + + if (lookingFor == '\0') { + data = null; + readOffset = 0; + } + } + } + + @Override + public void initialize() { + String cmd = get.getCommand(); + String args = get.stringify(); + + ByteBuffer bb = ByteBuffer.allocate(cmd.length() + key.length() + + args.length() + 16); + + setArguments(bb, cmd, key, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BTreeSortMergeGetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BTreeSortMergeGetOperationImpl.java new file mode 100644 index 000000000..3a211e8f4 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BTreeSortMergeGetOperationImpl.java @@ -0,0 +1,305 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Collection; + +import net.spy.memcached.collection.BTreeSMGet; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.BTreeSortMergeGetOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to retrieve b+tree data with multiple keys + */ +public class BTreeSortMergeGetOperationImpl extends OperationImpl implements + BTreeSortMergeGetOperation { + + private final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream(); + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus DUPLICATED = new CollectionOperationStatus( + true, "DUPLICATED", CollectionResponse.DUPLICATED); + private static final OperationStatus TRIMMED = new CollectionOperationStatus( + true, "TRIMMED", CollectionResponse.TRIMMED); + private static final OperationStatus DUPLICATED_TRIMMED = new CollectionOperationStatus( + true, "DUPLICATED_TRIMMED", CollectionResponse.DUPLICATED_TRIMMED); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus ATTR_MISMATCH = new CollectionOperationStatus( + false, "ATTR_MISMATCH", CollectionResponse.ATTR_MISMATCH); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + + protected final BTreeSMGet smGet; + + protected int flags = 0; + protected int count = 0; + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + protected int readState = 0; // 0 : value, 1 : missed keys + private int processedValueCount = 0; + + public BTreeSortMergeGetOperationImpl(BTreeSMGet smGet, + OperationCallback cb) { + super(cb); + this.smGet = smGet; + } + + /** + * VALUE \r\n + */ + public void handleLine(String line) { + // Response header + getLogger().debug("Got line %s", line); + + if (line.startsWith("VALUE ")) { + readState = 0; + + String[] stuff = line.split(" "); + assert "VALUE".equals(stuff[0]); + + count = Integer.parseInt(stuff[1]); + + if (count > 0) { + setReadType(OperationReadType.DATA); + } + } else if (line.startsWith("MISSED_KEYS")) { + readState = 1; + + String[] stuff = line.split(" "); + assert "MISSED_KEYS".equals(stuff[0]); + + count = Integer.parseInt(stuff[1]); + + if (count > 0) { + setReadType(OperationReadType.DATA); + } + } else { + OperationStatus status = matchStatus(line, END, TRIMMED, + DUPLICATED, DUPLICATED_TRIMMED, OUT_OF_RANGE, + ATTR_MISMATCH, TYPE_MISMATCH, BKEY_MISMATCH); + getLogger().debug(status); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } + } + + @Override + public final void handleRead(ByteBuffer bb) { + if (readState == 0) { + readValue(bb); + } else { + readMissedKeys(bb); + } + } + + private final void readValue(ByteBuffer bb) { + // Decode a collection data header. + if (lookingFor == '\0' && data == null) { + for (int i = 0; bb.remaining() > 0; i++) { + byte b = bb.get(); + + // Handle spaces. + if (b == ' ') { + + // Adjust space count if item header has a element flag. + String[] chunk = new String(byteBuffer.toByteArray()) + .split(" "); + if (chunk.length == smGet.headerCount) { + if (chunk[3].startsWith("0x")) { + spaceCount--; + } + } + + spaceCount++; + if (smGet.headerReady(spaceCount)) { + smGet.decodeItemHeader(new String(byteBuffer + .toByteArray())); + data = new byte[smGet.getDataLength()]; + byteBuffer.reset(); + spaceCount = 0; + processedValueCount++; + break; + } + } + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + + if ((new String(byteBuffer.toByteArray())) + .startsWith("MISSED_KEYS")) { + readState = 1; + byteBuffer.reset(); + spaceCount = 0; + return; + } + + OperationStatus status = matchStatus(byteBuffer.toString(), + END, TRIMMED, DUPLICATED, DUPLICATED_TRIMMED, + OUT_OF_RANGE, ATTR_MISMATCH, TYPE_MISMATCH, + BKEY_MISMATCH); + + getCallback().receivedStatus(status); + //transitionState(OperationState.COMPLETE); + data = null; + break; + } + + byteBuffer.write(b); + } + return; + } + + // Read data + // assert key != null; + assert data != null; + + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length : "readOffset is " + readOffset + + " data.length is " + data.length; + + getLogger() + .debug("readOffset: %d, length: %d", readOffset, data.length); + + if (lookingFor == '\0') { + int toRead = data.length - readOffset; + int available = bb.remaining(); + toRead = Math.min(toRead, available); + + getLogger().debug("Reading %d bytes", toRead); + + bb.get(data, readOffset, toRead); + readOffset += toRead; + } + + if (lookingFor == '\0' && readOffset == data.length) { + BTreeSortMergeGetOperation.Callback cb = (BTreeSortMergeGetOperation.Callback) getCallback(); + cb.gotData(smGet.getKey(), smGet.getSubkey(), smGet.getFlag(), data); + lookingFor = '\r'; + } + + if (lookingFor != '\0' && bb.hasRemaining()) { + do { + byte tmp = bb.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char) tmp; + + switch (lookingFor) { + case '\r': + lookingFor = '\n'; + break; + case '\n': + lookingFor = '\0'; + break; + default: + assert false : "Looking for unexpected char: " + + (char) lookingFor; + } + } while (lookingFor != '\0' && bb.hasRemaining()); + + if (lookingFor == '\0') { + data = null; + readOffset = 0; + } + } + } + + private final void readMissedKeys(ByteBuffer bb) { + if (lookingFor == '\0' && data == null) { + for (int i = 0; bb.remaining() > 0; i++) { + byte b = bb.get(); + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + OperationStatus status = matchStatus(byteBuffer.toString(), + END, TRIMMED, DUPLICATED, DUPLICATED_TRIMMED, + OUT_OF_RANGE, ATTR_MISMATCH, TYPE_MISMATCH, + BKEY_MISMATCH); + + if (status.isSuccess()) { + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } else { + ((BTreeSortMergeGetOperation.Callback) getCallback()) + .gotMissedKey(byteBuffer.toByteArray()); + } + byteBuffer.reset(); + } else + byteBuffer.write(b); + } + return; + } + } + + public void initialize() { + String cmd = smGet.getCommand(); + String args = smGet.stringify(); + + ByteBuffer bb = ByteBuffer.allocate(cmd.length() + args.length() + + smGet.getCommaSeparatedKeys().length() + 16); + + setArguments(bb, cmd, args); + + setArguments(bb, smGet.getCommaSeparatedKeys()); + + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return smGet.getKeyList(); + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BTreeStoreAndGetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BTreeStoreAndGetOperationImpl.java new file mode 100644 index 000000000..3b510abe4 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BTreeStoreAndGetOperationImpl.java @@ -0,0 +1,295 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.BTreeGetByPosition; +import net.spy.memcached.collection.BTreeStoreAndGet; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.BTreeStoreAndGetOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +public class BTreeStoreAndGetOperationImpl extends OperationImpl implements + BTreeStoreAndGetOperation { + + private static final int OVERHEAD = 32; + + private final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream(); + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus CREATED_STORED = new CollectionOperationStatus( + true, "CREATED_STORED", CollectionResponse.CREATED_STORED); + private static final OperationStatus STORED = new CollectionOperationStatus( + true, "STORED", CollectionResponse.STORED); + private static final OperationStatus REPLACED = new CollectionOperationStatus( + true, "REPLACED", CollectionResponse.REPLACED); + private static final OperationStatus TRIMMED = new CollectionOperationStatus( + true, "TRIMMED", CollectionResponse.TRIMMED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ELEMENT_EXISTS = new CollectionOperationStatus( + false, "ELEMENT_EXISTS", CollectionResponse.ELEMENT_EXISTS); + private static final OperationStatus OVERFLOWED = new CollectionOperationStatus( + false, "OVERFLOWED", CollectionResponse.OVERFLOWED); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus LENGTH_MISMATCH = new CollectionOperationStatus( + false, "LENGTH_MISMATCH", CollectionResponse.LENGTH_MISMATCH); + private static final OperationStatus UNDEFINED_OPERATION = new CollectionOperationStatus( + false, "UNDEFINED_OPERATION", CollectionResponse.UNDEFINED); + + private static final OperationStatus[] INSERT_AND_GET_STATUS_ON_LINE = { + STORED, CREATED_STORED, NOT_FOUND, ELEMENT_EXISTS, OVERFLOWED, + OUT_OF_RANGE, TYPE_MISMATCH, LENGTH_MISMATCH, BKEY_MISMATCH }; + + private static final OperationStatus[] UPSERT_AND_GET_STATUS_ON_LINE = { + STORED, CREATED_STORED, REPLACED, NOT_FOUND, OVERFLOWED, + OUT_OF_RANGE, TYPE_MISMATCH, LENGTH_MISMATCH, BKEY_MISMATCH }; + + private static final OperationStatus[] STORE_AND_GET_ON_DATA = { TRIMMED }; + + protected final String key; + protected final BTreeStoreAndGet get; + protected final byte[] dataToStore; + + protected int flags = 0; + protected int count = 0; + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + private Boolean hasEFlag = null; + + public BTreeStoreAndGetOperationImpl(String key, BTreeStoreAndGet get, + byte[] dataToStore, OperationCallback cb) { + super(cb); + this.key = key; + this.get = get; + this.dataToStore = dataToStore; + } + + @Override + public BTreeStoreAndGet getGet() { + return get; + } + + @Override + public void handleLine(String line) { + if (getLogger().isDebugEnabled()) { + getLogger().debug("Got line %s", line); + } + + // VALUE \r\n + if (line.startsWith("VALUE ")) { + String[] stuff = line.split(" "); + assert stuff.length == 3; + assert "VALUE".equals(stuff[0]); + + flags = Integer.parseInt(stuff[1]); + count = Integer.parseInt(stuff[2]); + + if (count > 0) { + // start to read actual data + setReadType(OperationReadType.DATA); + } + } else { + OperationStatus status = null; + switch (get.getCmd()) { + case INSERT: + status = matchStatus(line, INSERT_AND_GET_STATUS_ON_LINE); + break; + case UPSERT: + status = matchStatus(line, UPSERT_AND_GET_STATUS_ON_LINE); + break; + default: + status = UNDEFINED_OPERATION; + } + if (getLogger().isDebugEnabled()) { + getLogger().debug(status); + } + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } + } + + @Override + public void handleRead(ByteBuffer bb) { + // Decode a data header. + if (lookingFor == '\0' && data == null) { + for (int i = 0; bb.remaining() > 0; i++) { + byte b = bb.get(); + // Handle spaces to parse the header. + if (b == ' ') { + // One-time check to find if this responses have eflags. + if (hasEFlag == null && spaceCount == BTreeGetByPosition.HEADER_EFLAG_POSITION + 1) { + String[] chunk = new String(byteBuffer.toByteArray()) + .split(" "); + if (chunk[BTreeGetByPosition.HEADER_EFLAG_POSITION].startsWith("0x")) { + hasEFlag = true; + } else { + hasEFlag = false; + } + } + + spaceCount++; + + // Parse the value header. + // FIXME this is not cool... please fix this :-( + int spaceReduced = (hasEFlag != null && hasEFlag) ? 1 : 0; + if (get.headerReady(spaceCount - spaceReduced)) { + get.decodeItemHeader(new String(byteBuffer.toByteArray())); + data = new byte[get.getBytes()]; + byteBuffer.reset(); + spaceCount = 0; + hasEFlag = null; + break; + } + } + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + OperationStatus status = matchStatus(byteBuffer.toString(), STORE_AND_GET_ON_DATA); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Get complete!"); + } + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + data = null; + break; + } + + // Write to the result ByteBuffer + byteBuffer.write(b); + } + return; + } + + // Read data + assert key != null; + assert data != null; + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length + : "readOffset is " + readOffset + " data.length is " + data.length; + + if (getLogger().isDebugEnabled()) { + getLogger().debug("readOffset: %d, length: %d", readOffset, data.length); + } + + if (lookingFor == '\0') { + int toRead = data.length - readOffset; + int available = bb.remaining(); + toRead = Math.min(toRead, available); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Reading %d bytes", toRead); + } + + bb.get(data, readOffset, toRead); + readOffset += toRead; + } + + if (lookingFor == '\0' && readOffset == data.length) { + // put an element data. + BTreeStoreAndGetOperation.Callback cb = (BTreeStoreAndGetOperation.Callback) getCallback(); + cb.gotData(key, flags, get.getBkeyObject(), get.getElementFlag(), data); + + lookingFor = '\r'; + } + + if (lookingFor != '\0' && bb.hasRemaining()) { + do { + byte tmp = bb.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char) tmp; + + switch (lookingFor) { + case '\r': + lookingFor = '\n'; + break; + case '\n': + lookingFor = '\0'; + break; + default: + assert false : "Looking for unexpected char: " + + (char) lookingFor; + } + } while (lookingFor != '\0' && bb.hasRemaining()); + + if (lookingFor == '\0') { + data = null; + readOffset = 0; + } + } + } + + @Override + public void initialize() { + String args = get.stringify(); + ByteBuffer bb = ByteBuffer.allocate(dataToStore.length + + KeyUtil.getKeyBytes(key).length + + KeyUtil.getKeyBytes(get.getBkeyObject().getBKeyAsString()).length + + KeyUtil.getKeyBytes(get.getElementFlagByHex()).length + + args.length() + + OVERHEAD); + setArguments(bb, get.getCommand(), key, get.getBkeyObject().getBKeyAsString(), + get.getElementFlagByHex(), dataToStore.length, args); + bb.put(dataToStore); + bb.put(CRLF); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + @Override + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BaseGetOpImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BaseGetOpImpl.java new file mode 100644 index 000000000..858c4697c --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BaseGetOpImpl.java @@ -0,0 +1,153 @@ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Base class for get and gets handlers. + */ +abstract class BaseGetOpImpl extends OperationImpl { + + private static final OperationStatus END = new OperationStatus(true, "END"); + private static final byte[] RN_BYTES = "\r\n".getBytes(); + private final String cmd; + private final Collection keys; + private String currentKey = null; + private long casValue=0; + private int currentFlags = 0; + private byte[] data = null; + private int readOffset = 0; + private byte lookingFor = '\0'; + + public BaseGetOpImpl(String c, + OperationCallback cb, Collection k) { + super(cb); + cmd=c; + keys=k; + } + + /** + * Get the keys this GetOperation is looking for. + */ + public final Collection getKeys() { + return keys; + } + + @Override + public final void handleLine(String line) { + if(line.equals("END")) { + getLogger().debug("Get complete!"); + getCallback().receivedStatus(END); + transitionState(OperationState.COMPLETE); + data=null; + } else if(line.startsWith("VALUE ")) { + getLogger().debug("Got line %s", line); + String[] stuff=line.split(" "); + assert stuff[0].equals("VALUE"); + currentKey=stuff[1]; + currentFlags=Integer.parseInt(stuff[2]); + data=new byte[Integer.parseInt(stuff[3])]; + if(stuff.length > 4) { + casValue=Long.parseLong(stuff[4]); + } + readOffset=0; + getLogger().debug("Set read type to data"); + setReadType(OperationReadType.DATA); + } else { + assert false : "Unknown line type: " + line; + } + } + + @Override + public final void handleRead(ByteBuffer b) { + assert currentKey != null; + assert data != null; + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length + : "readOffset is " + readOffset + " data.length is " + data.length; + + getLogger().debug("readOffset: %d, length: %d", + readOffset, data.length); + // If we're not looking for termination, we're still looking for data + if(lookingFor == '\0') { + int toRead=data.length - readOffset; + int available=b.remaining(); + toRead=Math.min(toRead, available); + getLogger().debug("Reading %d bytes", toRead); + b.get(data, readOffset, toRead); + readOffset+=toRead; + } + // Transition us into a ``looking for \r\n'' kind of state if we've + // read enough and are still in a data state. + if(readOffset == data.length && lookingFor == '\0') { + // The callback is most likely a get callback. If it's not, then + // it's a gets callback. + try { + GetOperation.Callback gcb=(GetOperation.Callback)getCallback(); + gcb.gotData(currentKey, currentFlags, data); + } catch(ClassCastException e) { + GetsOperation.Callback gcb=(GetsOperation.Callback) + getCallback(); + gcb.gotData(currentKey, currentFlags, casValue, data); + } + lookingFor='\r'; + } + // If we're looking for an ending byte, let's go find it. + if(lookingFor != '\0' && b.hasRemaining()) { + do { + byte tmp=b.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char)tmp; + switch(lookingFor) { + case '\r': lookingFor='\n'; break; + case '\n': lookingFor='\0'; break; + default: + assert false: "Looking for unexpected char: " + + (char)lookingFor; + } + } while(lookingFor != '\0' && b.hasRemaining()); + // Completed the read, reset stuff. + if(lookingFor == '\0') { + currentKey=null; + data=null; + readOffset=0; + currentFlags=0; + getLogger().debug("Setting read type back to line."); + setReadType(OperationReadType.LINE); + } + } + } + + @Override + public final void initialize() { + // Figure out the length of the request + int size=6; // Enough for gets\r\n + Collection keyBytes=KeyUtil.getKeyBytes(keys); + for(byte[] k : keyBytes) { + size+=k.length; + size++; + } + ByteBuffer b=ByteBuffer.allocate(size); + b.put(cmd.getBytes()); + for(byte[] k : keyBytes) { + b.put((byte)' '); + b.put(k); + } + b.put(RN_BYTES); + b.flip(); + setBuffer(b); + } + + @Override + protected final void wasCancelled() { + getCallback().receivedStatus(CANCELLED); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/BaseStoreOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/BaseStoreOperationImpl.java new file mode 100644 index 000000000..bc67aec09 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/BaseStoreOperationImpl.java @@ -0,0 +1,100 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Base class for ascii store operations (add, set, replace, append, prepend). + */ +abstract class BaseStoreOperationImpl extends OperationImpl { + + private static final int OVERHEAD = 32; + private static final OperationStatus STORED = + new OperationStatus(true, "STORED"); + protected final String type; + protected final String key; + protected final int flags; + protected final int exp; + protected final byte[] data; + + public BaseStoreOperationImpl(String t, String k, int f, int e, + byte[] d, OperationCallback cb) { + super(cb); + type=t; + key=k; + flags=f; + exp=e; + data=d; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + getCallback().receivedStatus(matchStatus(line, STORED)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + ByteBuffer bb=ByteBuffer.allocate(data.length + + KeyUtil.getKeyBytes(key).length + OVERHEAD); + setArguments(bb, type, key, flags, exp, data.length); + assert bb.remaining() >= data.length + 2 + : "Not enough room in buffer, need another " + + (2 + data.length - bb.remaining()); + bb.put(data); + bb.put(CRLF); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + // XXX: Replace this comment with why I did this + getCallback().receivedStatus(CANCELLED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public int getFlags() { + return flags; + } + + public int getExpiration() { + return exp; + } + + public byte[] getData() { + return data; + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CASOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CASOperationImpl.java new file mode 100644 index 000000000..a6e3989f9 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CASOperationImpl.java @@ -0,0 +1,99 @@ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.CASResponse; +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.CASOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StoreType; + +class CASOperationImpl extends OperationImpl implements CASOperation { + + // Overhead storage stuff to make sure the buffer pushes out far enough. + // This is "cas" + length(flags) + length(length(data)) + length(cas id) + // + spaces + private static final int OVERHEAD = 64; + + private static final OperationStatus STORED= + new CASOperationStatus(true, "STORED", CASResponse.OK); + private static final OperationStatus NOT_FOUND= + new CASOperationStatus(false, "NOT_FOUND", CASResponse.NOT_FOUND); + private static final OperationStatus EXISTS= + new CASOperationStatus(false, "EXISTS", CASResponse.EXISTS); + + private final String key; + private final long casValue; + private final int flags; + private final int exp; + private final byte[] data; + + public CASOperationImpl(String k, long c, int f, int e, + byte[] d, OperationCallback cb) { + super(cb); + key=k; + casValue=c; + flags=f; + exp=e; + data=d; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + getCallback().receivedStatus(matchStatus(line, + STORED, NOT_FOUND, EXISTS)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + ByteBuffer bb=ByteBuffer.allocate(data.length + + KeyUtil.getKeyBytes(key).length + OVERHEAD); + setArguments(bb, "cas", key, flags, exp, data.length, casValue); + assert bb.remaining() >= data.length + 2 + : "Not enough room in buffer, need another " + + (2 + data.length - bb.remaining()); + bb.put(data); + bb.put(CRLF); + bb.flip(); + setBuffer(bb); + } + + @Override + protected void wasCancelled() { + // XXX: Replace this comment with why I did this + getCallback().receivedStatus(CANCELLED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public byte[] getBytes() { + return data; + } + + public long getCasValue() { + return casValue; + } + + public int getExpiration() { + return exp; + } + + public int getFlags() { + return flags; + } + + public StoreType getStoreType() { + return StoreType.set; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionBulkStoreOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionBulkStoreOperationImpl.java new file mode 100644 index 000000000..556b90120 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionBulkStoreOperationImpl.java @@ -0,0 +1,135 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import net.spy.memcached.collection.CollectionBulkStore; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionBulkStoreOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to store collection data in a memcached server. + */ +public class CollectionBulkStoreOperationImpl extends OperationImpl + implements CollectionBulkStoreOperation { + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus FAILED_END = new CollectionOperationStatus( + false, "END", CollectionResponse.END); + + private static final OperationStatus CREATED_STORED = new CollectionOperationStatus( + true, "CREATED_STORED", CollectionResponse.CREATED_STORED); + private static final OperationStatus STORED = new CollectionOperationStatus( + true, "STORED", CollectionResponse.STORED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ELEMENT_EXISTS = new CollectionOperationStatus( + false, "ELEMENT_EXISTS", CollectionResponse.ELEMENT_EXISTS); + private static final OperationStatus OVERFLOWED = new CollectionOperationStatus( + false, "OVERFLOWED", CollectionResponse.OVERFLOWED); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus LENGTH_MISMATCH = new CollectionOperationStatus( + false, "LENGTH_MISMATCH", CollectionResponse.LENGTH_MISMATCH); + + protected final String key; + protected final CollectionBulkStore store; + protected final CollectionBulkStoreOperation.Callback cb; + + protected int count; + protected int index = 0; + protected boolean successAll = true; + + public CollectionBulkStoreOperationImpl(List keyList, + CollectionBulkStore store, OperationCallback cb) { + super(cb); + this.key = keyList.get(0); + this.store = store; + this.cb = (Callback) cb; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + if (line.startsWith("END") || store.getItemCount() == 1) { + cb.receivedStatus((successAll)? END : FAILED_END); + transitionState(OperationState.COMPLETE); + return; + } else if (line.startsWith("RESPONSE ")) { + getLogger().debug("Got line %s", line); + + // TODO server should be fixed + line = line.replace(" ", " "); + line = line.replace(" ", " "); + + String[] stuff = line.split(" "); + assert "RESPONSE".equals(stuff[0]); + count = Integer.parseInt(stuff[1]); + } else { + OperationStatus status = matchStatus(line, STORED, CREATED_STORED, + NOT_FOUND, ELEMENT_EXISTS, OVERFLOWED, OUT_OF_RANGE, + TYPE_MISMATCH, LENGTH_MISMATCH); + + if (!status.isSuccess()) { + cb.gotStatus(index, status); + successAll = false; + } + + index++; + } + } + + @Override + public void initialize() { + ByteBuffer buffer = store.getAsciiCommand(); + setBuffer(buffer); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: \n" + + (new String(buffer.array())).replaceAll("\\r\\n", "\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public CollectionBulkStore getStore() { + return store; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionCountOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionCountOperationImpl.java new file mode 100644 index 000000000..007caf149 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionCountOperationImpl.java @@ -0,0 +1,111 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionCount; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionCountOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to get exists item count from collection in a memcached server. + */ +public class CollectionCountOperationImpl extends OperationImpl implements + CollectionCountOperation { + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + + protected final String key; + protected final CollectionCount collectionCount; + + protected int count = 0; + + public CollectionCountOperationImpl(String key, + CollectionCount collectionCount, OperationCallback cb) { + super(cb); + this.key = key; + this.collectionCount = collectionCount; + } + + /** + * VALUE \r\n + */ + public void handleLine(String line) { + if (line.startsWith("COUNT=")) { + getLogger().debug("Got line %s", line); + + String[] stuff = line.split("="); + assert "COUNT".equals(stuff[0]); + count = Integer.parseInt(stuff[1]); + + getCallback().receivedStatus( + new CollectionOperationStatus(new OperationStatus(true, + String.valueOf(count)))); + transitionState(OperationState.COMPLETE); + } else { + OperationStatus status = matchStatus(line, NOT_FOUND, TYPE_MISMATCH, BKEY_MISMATCH, UNREADABLE); + getLogger().debug(status); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } + } + + public void initialize() { + String cmd = collectionCount.getCommand(); + String args = collectionCount.stringify(); + ByteBuffer bb = ByteBuffer.allocate(key.length() + cmd.length() + + args.length() + 16); + + setArguments(bb, cmd, key, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionCreateOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionCreateOperationImpl.java new file mode 100644 index 000000000..cf1025b57 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionCreateOperationImpl.java @@ -0,0 +1,99 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.CollectionCreate; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionCreateOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to create empty collection in a memcached server. + */ +public class CollectionCreateOperationImpl extends OperationImpl + implements CollectionCreateOperation { + + private static final int OVERHEAD = 32; + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus CREATED = new CollectionOperationStatus( + true, "CREATED", CollectionResponse.CREATED); + private static final OperationStatus EXISTS = new CollectionOperationStatus( + true, "EXISTS", CollectionResponse.EXISTS); + private static final OperationStatus SERVER_ERROR = new CollectionOperationStatus( + false, "SERVER_ERROR", CollectionResponse.SERVER_ERROR); + + protected final String key; + protected final CollectionCreate collectionCreate; + + public CollectionCreateOperationImpl(String key, + CollectionCreate collectionCreate, OperationCallback cb) { + super(cb); + this.key = key; + this.collectionCreate = collectionCreate; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + getCallback().receivedStatus( + matchStatus(line, CREATED, EXISTS, SERVER_ERROR)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String args = collectionCreate.stringify(); + ByteBuffer bb = ByteBuffer.allocate(KeyUtil.getKeyBytes(key).length + + args.length() + + OVERHEAD); + setArguments(bb, collectionCreate.getCommand(), key, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replaceAll("\\r\\n", "")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + @Override + public Collection getKeys() { + return Collections.singleton(key); + } + + @Override + public CollectionCreate getCreate() { + return collectionCreate; + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionDeleteOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionDeleteOperationImpl.java new file mode 100644 index 000000000..84761ecea --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionDeleteOperationImpl.java @@ -0,0 +1,117 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionDelete; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionDeleteOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to delete collection data in a memcached server. + */ +public class CollectionDeleteOperationImpl extends OperationImpl + implements CollectionDeleteOperation { + + private static final OperationStatus DELETE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus DELETED = new CollectionOperationStatus( + true, "DELETED", CollectionResponse.DELETED); + private static final OperationStatus DELETED_DROPPED = new CollectionOperationStatus( + true, "DELETED_DROPPED", CollectionResponse.DELETED_DROPPED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + + protected String key; + protected CollectionDelete collectionDelete; + + public CollectionDeleteOperationImpl(String key, + CollectionDelete collectionDelete, OperationCallback cb) { + super(cb); + this.key = key; + this.collectionDelete = collectionDelete; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + OperationStatus status = matchStatus(line, DELETED, DELETED_DROPPED, + NOT_FOUND, NOT_FOUND_ELEMENT, OUT_OF_RANGE, TYPE_MISMATCH, + BKEY_MISMATCH); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String cmd = collectionDelete.getCommand(); + String args = collectionDelete.stringify(); + byte[] data = collectionDelete.getData(); + + ByteBuffer bb = ByteBuffer.allocate(key.length() + + cmd.length() + args.length() + data.length + 16); + + setArguments(bb, cmd, key, args); + + if ("sop delete".equals(cmd)) { + bb.put(data); + bb.put(CRLF); + } else if (data.length > 0) { + bb.put(data); + bb.put(CRLF); + } + + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(DELETE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public CollectionDelete getDelete() { + return collectionDelete; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionExistOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionExistOperationImpl.java new file mode 100644 index 000000000..cae86d095 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionExistOperationImpl.java @@ -0,0 +1,119 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.CollectionExist; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionExistOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to check membership of an item in collection in a memcached server. + */ +public class CollectionExistOperationImpl extends OperationImpl + implements CollectionExistOperation { + + private static final int OVERHEAD = 32; + + private static final OperationStatus EXIST_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus EXIST = new CollectionOperationStatus( + true, "EXIST", CollectionResponse.EXIST); + private static final OperationStatus NOT_EXIST = new CollectionOperationStatus( + true, "NOT_EXIST", CollectionResponse.NOT_EXIST); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + + protected final String key; + protected final String subkey; + protected final CollectionExist collectionExist; + protected final byte[] data; + + public CollectionExistOperationImpl(String key, String subkey, + CollectionExist collectionExist, OperationCallback cb) { + super(cb); + this.key = key; + this.subkey = subkey; + this.collectionExist = collectionExist; + this.data = collectionExist.getData(); + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + getCallback().receivedStatus( + matchStatus(line, EXIST, NOT_EXIST, NOT_FOUND, NOT_FOUND, + TYPE_MISMATCH, UNREADABLE)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String args = collectionExist.stringify(); + ByteBuffer bb = ByteBuffer.allocate(data.length + + KeyUtil.getKeyBytes(key).length + + KeyUtil.getKeyBytes(subkey).length + + args.length() + + OVERHEAD); + setArguments(bb, collectionExist.getCommand(), key, subkey, data.length, args); + bb.put(data); + bb.put(CRLF); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replaceAll("\\r\\n", "")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(EXIST_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public String getSubKey() { + return subkey; + } + + public CollectionExist getExist() { + return collectionExist; + } + + public byte[] getData() { + return data; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionGetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionGetOperationImpl.java new file mode 100644 index 000000000..e3583f929 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionGetOperationImpl.java @@ -0,0 +1,236 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionGet; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionGetOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to retrieve collection data in a memcached server. + */ +public class CollectionGetOperationImpl extends OperationImpl + implements CollectionGetOperation { + + private final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream(); + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus DELETED = new CollectionOperationStatus( + true, "DELETED", CollectionResponse.DELETED); + private static final OperationStatus DELETED_DROPPED = new CollectionOperationStatus( + true, "DELETED_DROPPED", CollectionResponse.DELETED_DROPPED); + private static final OperationStatus TRIMMED = new CollectionOperationStatus( + true, "TRIMMED", CollectionResponse.TRIMMED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + + protected final String key; + protected final CollectionGet collectionGet; + + protected int flags = 0; + protected int count = 0; + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + public CollectionGetOperationImpl(String key, CollectionGet collectionGet, + OperationCallback cb) { + super(cb); + this.key = key; + this.collectionGet = collectionGet; + } + + /** + * VALUE \r\n + */ + public void handleLine(String line) { + if (line.startsWith("VALUE ")) { + // Response header + getLogger().debug("Got line %s", line); + + String[] stuff = line.split(" "); + assert "VALUE".equals(stuff[0]); + + flags = Integer.parseInt(stuff[1]); + count = Integer.parseInt(stuff[2]); + + setReadType(OperationReadType.DATA); + } else { + OperationStatus status = matchStatus(line, END, TRIMMED, DELETED, + DELETED_DROPPED, NOT_FOUND, NOT_FOUND_ELEMENT, + OUT_OF_RANGE, TYPE_MISMATCH, BKEY_MISMATCH, UNREADABLE); + getLogger().debug(status); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } + } + + @Override + public final void handleRead(ByteBuffer bb) { + // Decode a collection data header. + if (lookingFor == '\0' && data == null) { + for (int i=0; bb.remaining() > 0; i++) { + byte b = bb.get(); + + // Handle spaces. + if (b == ' ') { + spaceCount++; + if (collectionGet.headerReady(spaceCount)) { + collectionGet.decodeItemHeader(new String(byteBuffer.toByteArray())); + byteBuffer.reset(); + + if (collectionGet.headerReady(spaceCount) + && collectionGet.eachRecordParseCompleted()) { +// if (collectionGet.getElementFlag() != null) { +// collectionGet.setHeaderCount(collectionGet +// .getHeaderCount() - 1); +// } + data = new byte[collectionGet.getDataLength()]; + spaceCount = 0; + break; + } + } + } + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + OperationStatus status = matchStatus(byteBuffer.toString(), + END, TRIMMED, DELETED, DELETED_DROPPED, NOT_FOUND, + NOT_FOUND_ELEMENT, OUT_OF_RANGE, TYPE_MISMATCH, + BKEY_MISMATCH, UNREADABLE); + + getLogger().debug("Get complete!"); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + data = null; + break; + } + + byteBuffer.write(b); + } + return; + } + + // Read data + assert key != null; + assert data != null; + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length + : "readOffset is " + readOffset + " data.length is " + data.length; + + getLogger().debug("readOffset: %d, length: %d", readOffset, data.length); + + if (lookingFor == '\0') { + int toRead = data.length - readOffset; + int available = bb.remaining(); + toRead = Math.min(toRead, available); + + getLogger().debug("Reading %d bytes", toRead); + + bb.get(data, readOffset, toRead); + readOffset += toRead; + } + + if (lookingFor == '\0' && readOffset == data.length) { + CollectionGetOperation.Callback cb = + (CollectionGetOperation.Callback) getCallback(); + cb.gotData(key, collectionGet.getSubkey(), flags, data); + lookingFor = '\r'; + } + + if (lookingFor != '\0' && bb.hasRemaining()) { + do { + byte tmp = bb.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char)tmp; + + switch (lookingFor) { + case '\r': lookingFor = '\n'; break; + case '\n': lookingFor = '\0'; break; + default: + assert false : "Looking for unexpected char: " + + (char)lookingFor; + } + } while (lookingFor != '\0' && bb.hasRemaining()); + + if (lookingFor == '\0') { + data = null; + readOffset = 0; + } + } + } + + public void initialize() { + String cmd = collectionGet.getCommand(); + String args = collectionGet.stringify(); + ByteBuffer bb = ByteBuffer.allocate(key.length() + + cmd.length() + args.length() + 16); + + setArguments(bb, cmd, key, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public CollectionGet getGet() { + return collectionGet; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionMutateOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionMutateOperationImpl.java new file mode 100644 index 000000000..8d293a453 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionMutateOperationImpl.java @@ -0,0 +1,114 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionMutate; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionMutateOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to incr/decr item value from collection in a memcached server. + */ +public class CollectionMutateOperationImpl extends OperationImpl implements + CollectionMutateOperation { + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + + protected final String key; + protected final String subkey; + protected final CollectionMutate collectionMutate; + + public CollectionMutateOperationImpl(String key, String subkey, + CollectionMutate collectionMutate, OperationCallback cb) { + super(cb); + this.key = key; + this.subkey = subkey; + this.collectionMutate = collectionMutate; + } + + /** + * \r\n + */ + public void handleLine(String line) { + + OperationStatus status = null; + + try { + Long.valueOf(line); + getCallback().receivedStatus(new OperationStatus(true, line)); + } catch (NumberFormatException e) { + status = matchStatus(line, NOT_FOUND, TYPE_MISMATCH, BKEY_MISMATCH, + UNREADABLE, NOT_FOUND_ELEMENT); + + getLogger().debug(status); + getCallback().receivedStatus(status); + } + + transitionState(OperationState.COMPLETE); + } + + public void initialize() { + String cmd = collectionMutate.getCommand(); + String args = collectionMutate.stringify(); + ByteBuffer bb = ByteBuffer.allocate(key.length() + subkey.length() + + cmd.length() + args.length() + 16); + + setArguments(bb, cmd, key, subkey, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())) + .replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public String getSubKey() { + return subkey; + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedExistOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedExistOperationImpl.java new file mode 100644 index 000000000..8e2b7935f --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedExistOperationImpl.java @@ -0,0 +1,133 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.SetPipedExist; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionPipedExistOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +public class CollectionPipedExistOperationImpl extends OperationImpl implements + CollectionPipedExistOperation { + + private static final OperationStatus EXIST_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus EXIST = new CollectionOperationStatus( + true, "EXIST", CollectionResponse.EXIST); + private static final OperationStatus NOT_EXIST = new CollectionOperationStatus( + true, "NOT_EXIST", CollectionResponse.NOT_EXIST); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus FAILED_END = new CollectionOperationStatus( + false, "END", CollectionResponse.END); + + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + + protected final String key; + protected final SetPipedExist setPipedExist; + protected final CollectionPipedExistOperation.Callback cb; + + protected int count; + protected int index = 0; + protected boolean successAll = true; + + public CollectionPipedExistOperationImpl(String key, + SetPipedExist collectionExist, OperationCallback cb) { + super(cb); + this.key = key; + this.setPipedExist = collectionExist; + this.cb = (Callback) cb; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING : "Read ``" + line + + "'' when in " + getState() + " state"; + + if (line.startsWith("END") || setPipedExist.getItemCount() == 1) { + if (setPipedExist.getItemCount() == 1) { + OperationStatus status = matchStatus(line, EXIST, NOT_EXIST, + NOT_FOUND, TYPE_MISMATCH, UNREADABLE); + cb.gotStatus(index, status); + } + cb.receivedStatus((successAll) ? END : FAILED_END); + transitionState(OperationState.COMPLETE); + return; + } else if (line.startsWith("RESPONSE ")) { + getLogger().debug("Got line %s", line); + + // TODO server should be fixed + line = line.replace(" ", " "); + line = line.replace(" ", " "); + + String[] stuff = line.split(" "); + assert "RESPONSE".equals(stuff[0]); + count = Integer.parseInt(stuff[1]); + } else { + OperationStatus status = matchStatus(line, EXIST, NOT_EXIST, + NOT_FOUND, TYPE_MISMATCH, UNREADABLE); + + if (!status.isSuccess()) { + successAll = false; + } + + cb.gotStatus(index, status); + + index++; + } + } + + @Override + public void initialize() { + ByteBuffer buffer = setPipedExist.getAsciiCommand(); + setBuffer(buffer); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: \n" + + (new String(buffer.array())).replaceAll("\\r\\n", + "\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(EXIST_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public SetPipedExist getExist() { + return setPipedExist; + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedStoreOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedStoreOperationImpl.java new file mode 100644 index 000000000..1b95f6c82 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedStoreOperationImpl.java @@ -0,0 +1,134 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionPipedStore; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionPipedStoreOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to store collection data in a memcached server. + */ +public class CollectionPipedStoreOperationImpl extends OperationImpl + implements CollectionPipedStoreOperation { + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus FAILED_END = new CollectionOperationStatus( + false, "END", CollectionResponse.END); + + private static final OperationStatus CREATED_STORED = new CollectionOperationStatus( + true, "CREATED_STORED", CollectionResponse.CREATED_STORED); + private static final OperationStatus STORED = new CollectionOperationStatus( + true, "STORED", CollectionResponse.STORED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ELEMENT_EXISTS = new CollectionOperationStatus( + false, "ELEMENT_EXISTS", CollectionResponse.ELEMENT_EXISTS); + private static final OperationStatus OVERFLOWED = new CollectionOperationStatus( + false, "OVERFLOWED", CollectionResponse.OVERFLOWED); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus LENGTH_MISMATCH = new CollectionOperationStatus( + false, "LENGTH_MISMATCH", CollectionResponse.LENGTH_MISMATCH); + + protected final String key; + protected final CollectionPipedStore store; + protected final CollectionPipedStoreOperation.Callback cb; + + protected int count; + protected int index = 0; + protected boolean successAll = true; + + public CollectionPipedStoreOperationImpl(String key, + CollectionPipedStore store, OperationCallback cb) { + super(cb); + this.key = key; + this.store = store; + this.cb = (Callback) cb; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + if (line.startsWith("END") || store.getItemCount() == 1) { + cb.receivedStatus((successAll)? END : FAILED_END); + transitionState(OperationState.COMPLETE); + return; + } else if (line.startsWith("RESPONSE ")) { + getLogger().debug("Got line %s", line); + + // TODO server should be fixed + line = line.replace(" ", " "); + line = line.replace(" ", " "); + + String[] stuff = line.split(" "); + assert "RESPONSE".equals(stuff[0]); + count = Integer.parseInt(stuff[1]); + } else { + OperationStatus status = matchStatus(line, STORED, CREATED_STORED, + NOT_FOUND, ELEMENT_EXISTS, OVERFLOWED, OUT_OF_RANGE, + TYPE_MISMATCH, LENGTH_MISMATCH); + + if (!status.isSuccess()) { + cb.gotStatus(index, status); + successAll = false; + } + + index++; + } + } + + @Override + public void initialize() { + ByteBuffer buffer = store.getAsciiCommand(); + setBuffer(buffer); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: \n" + + (new String(buffer.array())).replaceAll("\\r\\n", "\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public CollectionPipedStore getStore() { + return store; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedUpdateOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedUpdateOperationImpl.java new file mode 100644 index 000000000..a06fd05cc --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionPipedUpdateOperationImpl.java @@ -0,0 +1,136 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionPipedUpdate; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionPipedUpdateOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to store collection data in a memcached server. + */ +public class CollectionPipedUpdateOperationImpl extends OperationImpl implements + CollectionPipedUpdateOperation { + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus FAILED_END = new CollectionOperationStatus( + false, "END", CollectionResponse.END); + + private static final OperationStatus UPDATED = new CollectionOperationStatus( + true, "UPDATED", CollectionResponse.UPDATED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + private static final OperationStatus NOTHING_TO_UPDATE = new CollectionOperationStatus( + false, "NOTHING_TO_UPDATE", CollectionResponse.NOTHING_TO_UPDATE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus EFLAG_MISMATCH = new CollectionOperationStatus( + false, "EFLAG_MISMATCH", CollectionResponse.EFLAG_MISMATCH); + private static final OperationStatus SERVER_ERROR = new CollectionOperationStatus( + false, "SERVER_ERROR", CollectionResponse.SERVER_ERROR); + + protected final String key; + protected final CollectionPipedUpdate update; + protected final CollectionPipedUpdateOperation.Callback cb; + + protected int count; + protected int index = 0; + protected boolean successAll = true; + + public CollectionPipedUpdateOperationImpl(String key, + CollectionPipedUpdate update, OperationCallback cb) { + super(cb); + this.key = key; + this.update = update; + this.cb = (Callback) cb; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING : "Read ``" + line + + "'' when in " + getState() + " state"; + if (line.startsWith("END") || update.getItemCount() == 1) { + cb.receivedStatus((successAll) ? END : FAILED_END); + transitionState(OperationState.COMPLETE); + return; + } else if (line.startsWith("RESPONSE ")) { + getLogger().debug("Got line %s", line); + + // TODO server should be fixed + line = line.replace(" ", " "); + line = line.replace(" ", " "); + + String[] stuff = line.split(" "); + assert "RESPONSE".equals(stuff[0]); + count = Integer.parseInt(stuff[1]); + } else { + OperationStatus status = matchStatus(line, UPDATED, NOT_FOUND, + NOT_FOUND_ELEMENT, NOTHING_TO_UPDATE, TYPE_MISMATCH, + BKEY_MISMATCH, EFLAG_MISMATCH, SERVER_ERROR); + + if (!status.isSuccess()) { + cb.gotStatus(index, status); + successAll = false; + } + + index++; + } + } + + @Override + public void initialize() { + ByteBuffer buffer = update.getAsciiCommand(); + setBuffer(buffer); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: \n" + + (new String(buffer.array())).replaceAll("\\r\\n", + "\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public CollectionPipedUpdate getUpdate() { + return update; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionStoreOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionStoreOperationImpl.java new file mode 100644 index 000000000..926d44ed9 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionStoreOperationImpl.java @@ -0,0 +1,129 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.CollectionStore; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionStoreOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to store collection data in a memcached server. + */ +public class CollectionStoreOperationImpl extends OperationImpl + implements CollectionStoreOperation { + + private static final int OVERHEAD = 32; + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus CREATED_STORED = new CollectionOperationStatus( + true, "CREATED_STORED", CollectionResponse.CREATED_STORED); + private static final OperationStatus STORED = new CollectionOperationStatus( + true, "STORED", CollectionResponse.STORED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ELEMENT_EXISTS = new CollectionOperationStatus( + false, "ELEMENT_EXISTS", CollectionResponse.ELEMENT_EXISTS); + private static final OperationStatus OVERFLOWED = new CollectionOperationStatus( + false, "OVERFLOWED", CollectionResponse.OVERFLOWED); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus LENGTH_MISMATCH = new CollectionOperationStatus( + false, "LENGTH_MISMATCH", CollectionResponse.LENGTH_MISMATCH); + + protected final String key; + protected final String subkey; // e.g.) 0 or 0x00 + protected final CollectionStore collectionStore; + protected final byte[] data; + + public CollectionStoreOperationImpl(String key, String subkey, + CollectionStore collectionStore, byte[] data, OperationCallback cb) { + super(cb); + this.key = key; + this.subkey = subkey; + this.collectionStore = collectionStore; + this.data = data; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + getCallback().receivedStatus( + matchStatus(line, STORED, CREATED_STORED, NOT_FOUND, ELEMENT_EXISTS, + OVERFLOWED, OUT_OF_RANGE, TYPE_MISMATCH, LENGTH_MISMATCH, BKEY_MISMATCH)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String args = collectionStore.stringify(); + ByteBuffer bb = ByteBuffer.allocate(data.length + + KeyUtil.getKeyBytes(key).length + + KeyUtil.getKeyBytes(subkey).length + + KeyUtil.getKeyBytes(collectionStore.getElementFlagByHex()).length + + args.length() + + OVERHEAD); + setArguments(bb, collectionStore.getCommand(), key, subkey, + collectionStore.getElementFlagByHex(), data.length, args); + bb.put(data); + bb.put(CRLF); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public String getSubKey() { + return subkey; + } + + public CollectionStore getStore() { + return collectionStore; + } + + public byte[] getData() { + return data; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionUpdateOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionUpdateOperationImpl.java new file mode 100644 index 000000000..f067a94cf --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionUpdateOperationImpl.java @@ -0,0 +1,137 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.CollectionUpdate; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionUpdateOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to update collection data in a memcached server. + */ +public class CollectionUpdateOperationImpl extends OperationImpl implements + CollectionUpdateOperation { + + private static final int OVERHEAD = 32; + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus UPDATED = new CollectionOperationStatus( + true, "UPDATED", CollectionResponse.UPDATED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + private static final OperationStatus NOTHING_TO_UPDATE = new CollectionOperationStatus( + false, "NOTHING_TO_UPDATE", CollectionResponse.NOTHING_TO_UPDATE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus EFLAG_MISMATCH = new CollectionOperationStatus( + false, "EFLAG_MISMATCH", CollectionResponse.EFLAG_MISMATCH); + private static final OperationStatus SERVER_ERROR = new CollectionOperationStatus( + false, "SERVER_ERROR", CollectionResponse.SERVER_ERROR); + + protected final String key; + protected final String subkey; // e.g.) 0 or 0x00 + protected final CollectionUpdate collectionUpdate; + protected final byte[] data; + + public CollectionUpdateOperationImpl(String key, String subkey, + CollectionUpdate collectionUpdate, byte[] data, + OperationCallback cb) { + super(cb); + this.key = key; + this.subkey = subkey; + this.collectionUpdate = collectionUpdate; + this.data = data; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING : "Read ``" + line + + "'' when in " + getState() + " state"; + getCallback().receivedStatus( + matchStatus(line, UPDATED, NOT_FOUND, NOT_FOUND_ELEMENT, + NOTHING_TO_UPDATE, TYPE_MISMATCH, BKEY_MISMATCH, + EFLAG_MISMATCH, SERVER_ERROR)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String args = collectionUpdate.stringify(); + + ByteBuffer bb = ByteBuffer + .allocate(((data != null) ? data.length : 0) + + KeyUtil.getKeyBytes(key).length + + KeyUtil.getKeyBytes(subkey).length + + KeyUtil.getKeyBytes(collectionUpdate + .getElementFlagByHex()).length + args.length() + + OVERHEAD); + + setArguments(bb, collectionUpdate.getCommand(), key, subkey, args, + ((data != null) ? data.length : "-1")); + + if (data != null) { + bb.put(data); + bb.put(CRLF); + } + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: '" + + (new String(bb.array())).replaceAll("\\r\\n", + "\r\n") + "'"); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public String getSubKey() { + return subkey; + } + + public CollectionUpdate getUpdate() { + return collectionUpdate; + } + + public byte[] getData() { + return data; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/CollectionUpsertOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/CollectionUpsertOperationImpl.java new file mode 100644 index 000000000..c96e05c1c --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/CollectionUpsertOperationImpl.java @@ -0,0 +1,134 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.CollectionStore; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.CollectionStoreOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to store collection data in a memcached server. + */ +public class CollectionUpsertOperationImpl extends OperationImpl implements + CollectionStoreOperation { + + private static final int OVERHEAD = 32; + + private static final OperationStatus STORE_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus CREATED_STORED = new CollectionOperationStatus( + true, "CREATED_STORED", CollectionResponse.CREATED_STORED); + private static final OperationStatus STORED = new CollectionOperationStatus( + true, "STORED", CollectionResponse.STORED); + private static final OperationStatus REPLACED = new CollectionOperationStatus( + true, "REPLACED", CollectionResponse.REPLACED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ELEMENT_EXISTS = new CollectionOperationStatus( + false, "ELEMENT_EXISTS", CollectionResponse.ELEMENT_EXISTS); + private static final OperationStatus OVERFLOWED = new CollectionOperationStatus( + false, "OVERFLOWED", CollectionResponse.OVERFLOWED); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus LENGTH_MISMATCH = new CollectionOperationStatus( + false, "LENGTH_MISMATCH", CollectionResponse.LENGTH_MISMATCH); + + protected final String key; + protected final String subkey; + protected final CollectionStore collectionStore; + protected final byte[] data; + + public CollectionUpsertOperationImpl(String key, String subkey, + CollectionStore collectionStore, byte[] data, + OperationCallback cb) { + super(cb); + this.key = key; + this.subkey = subkey; + this.collectionStore = collectionStore; + this.data = data; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING : "Read ``" + line + + "'' when in " + getState() + " state"; + getCallback().receivedStatus( + matchStatus(line, STORED, REPLACED, CREATED_STORED, NOT_FOUND, + ELEMENT_EXISTS, OVERFLOWED, OUT_OF_RANGE, + TYPE_MISMATCH, LENGTH_MISMATCH)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + String args = collectionStore.stringify(); + ByteBuffer bb = ByteBuffer + .allocate(data.length + + KeyUtil.getKeyBytes(key).length + + KeyUtil.getKeyBytes(subkey).length + + KeyUtil.getKeyBytes(collectionStore.getElementFlagByHex()).length + + args.length() + + OVERHEAD); + setArguments(bb, collectionStore.getCommand(), key, subkey, + collectionStore.getElementFlagByHex(), data.length, args); + bb.put(data); + bb.put(CRLF); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug( + "Request in ascii protocol: " + + (new String(bb.array())).replaceAll("\\r\\n", + "\r\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(STORE_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public String getSubKey() { + return subkey; + } + + public CollectionStore getStore() { + return collectionStore; + } + + public byte[] getData() { + return data; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/ConcatenationOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/ConcatenationOperationImpl.java new file mode 100644 index 000000000..326a01ba7 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/ConcatenationOperationImpl.java @@ -0,0 +1,30 @@ +package net.spy.memcached.protocol.ascii; + +import net.spy.memcached.ops.ConcatenationOperation; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.OperationCallback; + +/** + * Operation for ascii concatenations. + */ +public class ConcatenationOperationImpl extends BaseStoreOperationImpl + implements ConcatenationOperation { + + private final ConcatenationType concatType; + + public ConcatenationOperationImpl(ConcatenationType t, String k, + byte[] d, OperationCallback cb) { + super(t.name(), k, 0, 0, d, cb); + concatType = t; + } + + public long getCasValue() { + // ASCII cat ops don't have CAS. + return 0; + } + + public ConcatenationType getStoreType() { + return concatType; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/DeleteOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/DeleteOperationImpl.java new file mode 100644 index 000000000..8ff406d2f --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/DeleteOperationImpl.java @@ -0,0 +1,55 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to delete an item from the cache. + */ +final class DeleteOperationImpl extends OperationImpl + implements DeleteOperation { + + private static final int OVERHEAD=32; + + private static final OperationStatus DELETED= + new OperationStatus(true, "DELETED"); + private static final OperationStatus NOT_FOUND= + new OperationStatus(false, "NOT_FOUND"); + + private final String key; + + public DeleteOperationImpl(String k, OperationCallback cb) { + super(cb); + key=k; + } + + @Override + public void handleLine(String line) { + getLogger().debug("Delete of %s returned %s", key, line); + getCallback().receivedStatus(matchStatus(line, DELETED, NOT_FOUND)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + ByteBuffer b=ByteBuffer.allocate( + KeyUtil.getKeyBytes(key).length + OVERHEAD); + setArguments(b, "delete", key); + b.flip(); + setBuffer(b); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/ExtendedBTreeGetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/ExtendedBTreeGetOperationImpl.java new file mode 100644 index 000000000..7318f3c7c --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/ExtendedBTreeGetOperationImpl.java @@ -0,0 +1,238 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.ExtendedBTreeGet; +import net.spy.memcached.collection.CollectionGet; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionGetOperation; +import net.spy.memcached.ops.ExtendedBTreeGetOperation; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation to retrieve collection data in a memcached server. + */ +public class ExtendedBTreeGetOperationImpl extends OperationImpl + implements CollectionGetOperation { + + private final ByteArrayOutputStream byteBuffer = new ByteArrayOutputStream(); + + private static final OperationStatus GET_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus TRIMMED = new CollectionOperationStatus( + true, "TRIMMED", CollectionResponse.TRIMMED); + private static final OperationStatus DELETED = new CollectionOperationStatus( + true, "DELETED", CollectionResponse.DELETED); + private static final OperationStatus DELETED_DROPPED = new CollectionOperationStatus( + true, "DELETED_DROPPED", CollectionResponse.DELETED_DROPPED); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus NOT_FOUND_ELEMENT = new CollectionOperationStatus( + false, "NOT_FOUND_ELEMENT", CollectionResponse.NOT_FOUND_ELEMENT); + private static final OperationStatus OUT_OF_RANGE = new CollectionOperationStatus( + false, "OUT_OF_RANGE", CollectionResponse.OUT_OF_RANGE); + private static final OperationStatus TYPE_MISMATCH = new CollectionOperationStatus( + false, "TYPE_MISMATCH", CollectionResponse.TYPE_MISMATCH); + private static final OperationStatus BKEY_MISMATCH = new CollectionOperationStatus( + false, "BKEY_MISMATCH", CollectionResponse.BKEY_MISMATCH); + private static final OperationStatus UNREADABLE = new CollectionOperationStatus( + false, "UNREADABLE", CollectionResponse.UNREADABLE); + + protected final String key; + protected final CollectionGet collectionGet; + + protected int flags = 0; + protected int count = 0; + protected byte[] data = null; + protected int readOffset = 0; + protected byte lookingFor = '\0'; + protected int spaceCount = 0; + + public ExtendedBTreeGetOperationImpl(String key, CollectionGet collectionGet, + OperationCallback cb) { + super(cb); + this.key = key; + this.collectionGet = collectionGet; + } + + /** + * VALUE \r\n + */ + public void handleLine(String line) { + if (line.startsWith("VALUE ")) { + // Response header + getLogger().debug("Got line %s", line); + + String[] stuff = line.split(" "); + assert "VALUE".equals(stuff[0]); + + flags = Integer.parseInt(stuff[1]); + count = Integer.parseInt(stuff[2]); + + setReadType(OperationReadType.DATA); + } else { + OperationStatus status = matchStatus(line, END, TRIMMED, DELETED, + DELETED_DROPPED, NOT_FOUND, NOT_FOUND_ELEMENT, + OUT_OF_RANGE, TYPE_MISMATCH, BKEY_MISMATCH, UNREADABLE); + getLogger().debug(status); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + return; + } + } + + @Override + public final void handleRead(ByteBuffer bb) { + // Decode a collection data header. + if (lookingFor == '\0' && data == null) { + for (int i=0; bb.remaining() > 0; i++) { + byte b = bb.get(); + + // Handle spaces. + if (b == ' ') { + spaceCount++; + if (collectionGet.headerReady(spaceCount)) { + collectionGet.decodeItemHeader(new String(byteBuffer.toByteArray())); + byteBuffer.reset(); + + if (collectionGet.headerReady(spaceCount) + && collectionGet.eachRecordParseCompleted()) { +// if (collectionGet.getElementFlag() != null) { +// collectionGet.setHeaderCount(collectionGet +// .getHeaderCount() - 1); +// } + data = new byte[collectionGet.getDataLength()]; + spaceCount = 0; + break; + } + } + } + + // Ready to finish. + if (b == '\r') { + continue; + } + + // Finish the operation. + if (b == '\n') { + OperationStatus status = matchStatus(byteBuffer.toString(), + END, TRIMMED, DELETED, DELETED_DROPPED, NOT_FOUND, + NOT_FOUND_ELEMENT, OUT_OF_RANGE, TYPE_MISMATCH, + BKEY_MISMATCH, UNREADABLE); + + getLogger().debug("Get complete!"); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + data = null; + break; + } + + byteBuffer.write(b); + } + return; + } + + // Read data + assert key != null; + assert data != null; + // This will be the case, because we'll clear them when it's not. + assert readOffset <= data.length + : "readOffset is " + readOffset + " data.length is " + data.length; + + getLogger().debug("readOffset: %d, length: %d", readOffset, data.length); + + if (lookingFor == '\0') { + int toRead = data.length - readOffset; + int available = bb.remaining(); + toRead = Math.min(toRead, available); + + getLogger().debug("Reading %d bytes", toRead); + + bb.get(data, readOffset, toRead); + readOffset += toRead; + } + + if (lookingFor == '\0' && readOffset == data.length) { + ExtendedBTreeGetOperation.Callback cb = + (ExtendedBTreeGetOperation.Callback) getCallback(); + cb.gotData(key, ((ExtendedBTreeGet) collectionGet).getLongSubkey(), + collectionGet.getElementFlag(), flags, data); + lookingFor = '\r'; + } + + if (lookingFor != '\0' && bb.hasRemaining()) { + do { + byte tmp = bb.get(); + assert tmp == lookingFor : "Expecting " + lookingFor + ", got " + + (char)tmp; + + switch (lookingFor) { + case '\r': lookingFor = '\n'; break; + case '\n': lookingFor = '\0'; break; + default: + assert false : "Looking for unexpected char: " + + (char)lookingFor; + } + } while (lookingFor != '\0' && bb.hasRemaining()); + + if (lookingFor == '\0') { + data = null; + readOffset = 0; + } + } + } + + public void initialize() { + String cmd = collectionGet.getCommand(); + String args = collectionGet.stringify(); + ByteBuffer bb = ByteBuffer.allocate(key.length() + + cmd.length() + args.length() + 16); + + setArguments(bb, cmd, key, args); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(GET_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public CollectionGet getGet() { + return collectionGet; + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/FlushByPrefixOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/FlushByPrefixOperationImpl.java new file mode 100644 index 000000000..1f226a6c1 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/FlushByPrefixOperationImpl.java @@ -0,0 +1,70 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; + +import net.spy.memcached.ops.FlushOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Arcus flush by prefix operation. + */ +final class FlushByPrefixOperationImpl extends OperationImpl implements + FlushOperation { + + private static final OperationStatus OK = new OperationStatus(true, "OK"); + private static final OperationStatus NOT_FOUND = new OperationStatus(true, "NOT_FOUND"); + + private final String prefix; + private final int delay; + private final boolean noreply; + + public FlushByPrefixOperationImpl(String prefix, int delay, + boolean noreply, OperationCallback cb) { + super(cb); + this.prefix = prefix; + this.delay = delay; + this.noreply = noreply; + } + + @Override + public void handleLine(String line) { + getLogger().debug("Flush completed successfully"); + getCallback().receivedStatus(matchStatus(line, OK, NOT_FOUND)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + StringBuilder sb = new StringBuilder(); + sb.append("flush_prefix "); + sb.append(prefix); + if (delay != -1) + sb.append(" ").append(delay); + if (noreply) + sb.append(" noreply"); + sb.append("\r\n"); + + ByteBuffer bb = ByteBuffer.allocate(sb.length()); + bb.put(sb.toString().getBytes()); + bb.flip(); + setBuffer(bb); + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/FlushOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/FlushOperationImpl.java new file mode 100644 index 000000000..51ed9c82b --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/FlushOperationImpl.java @@ -0,0 +1,49 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; + +import net.spy.memcached.ops.FlushOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Memcached flush_all operation. + */ +final class FlushOperationImpl extends OperationImpl + implements FlushOperation { + + private static final byte[] FLUSH="flush_all\r\n".getBytes(); + + private static final OperationStatus OK= + new OperationStatus(true, "OK"); + + private final int delay; + + public FlushOperationImpl(int d, OperationCallback cb) { + super(cb); + delay=d; + } + + @Override + public void handleLine(String line) { + getLogger().debug("Flush completed successfully"); + getCallback().receivedStatus(matchStatus(line, OK)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + ByteBuffer b=null; + if(delay == -1) { + b=ByteBuffer.wrap(FLUSH); + } else { + b=ByteBuffer.allocate(32); + b.put( ("flush_all " + delay + "\r\n").getBytes()); + b.flip(); + } + setBuffer(b); + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/GetAttrOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/GetAttrOperationImpl.java new file mode 100644 index 000000000..2bab37641 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/GetAttrOperationImpl.java @@ -0,0 +1,105 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.GetAttrOperation; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Implementation of the gets operation. + */ +class GetAttrOperationImpl extends OperationImpl implements GetAttrOperation { + + private static final String CMD = "getattr"; + + private static final OperationStatus ATTR_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus END = new CollectionOperationStatus( + true, "END", CollectionResponse.END); + private static final OperationStatus NOT_FOUND = new CollectionOperationStatus( + false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ATTR_ERROR_NOT_FOUND = new CollectionOperationStatus( + false, "ATTR_ERROR not found", + CollectionResponse.ATTR_ERROR_NOT_FOUND); + + protected final String key; + protected final GetAttrOperation.Callback cb; + + public GetAttrOperationImpl(String key, GetAttrOperation.Callback cb) { + super(cb); + this.key = key; + this.cb = cb; + } + + @Override + public void handleLine(String line) { + OperationStatus status = matchStatus(line, END, NOT_FOUND, + ATTR_ERROR_NOT_FOUND); + + if (line.startsWith("ATTR ")) { + getLogger().debug("Got line %s", line); + + String[] stuff = line.split(" "); + + assert stuff.length == 2; + assert stuff[0].equals("ATTR"); + + cb.gotAttribute(key, stuff[1]); + } else if (status.isSuccess()) { + getLogger().debug(status); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + } else { + getLogger().debug(status); + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + } + } + + @Override + public void initialize() { + int size = CMD.length() + key.length() + 16; + ByteBuffer bb = ByteBuffer.allocate(size); + setArguments(bb, CMD, key); + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(ATTR_CANCELED); + } + + @Override + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/GetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/GetOperationImpl.java new file mode 100644 index 000000000..95ef592d4 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/GetOperationImpl.java @@ -0,0 +1,26 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; + +import net.spy.memcached.ops.GetOperation; + +/** + * Operation for retrieving data. + */ +class GetOperationImpl extends BaseGetOpImpl implements GetOperation { + + private static final String CMD="get"; + + public GetOperationImpl(String key, GetOperation.Callback c) { + super(CMD, c, Collections.singleton(key)); + } + + public GetOperationImpl(Collection k, GetOperation.Callback c) { + super(CMD, c, new HashSet(k)); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/GetsOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/GetsOperationImpl.java new file mode 100644 index 000000000..7c9764a49 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/GetsOperationImpl.java @@ -0,0 +1,18 @@ +package net.spy.memcached.protocol.ascii; + +import java.util.Collections; + +import net.spy.memcached.ops.GetsOperation; + +/** + * Implementation of the gets operation. + */ +class GetsOperationImpl extends BaseGetOpImpl implements GetsOperation { + + private static final String CMD="gets"; + + public GetsOperationImpl(String key, GetsOperation.Callback cb) { + super(CMD, cb, Collections.singleton(key)); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/MutatorOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/MutatorOperationImpl.java new file mode 100644 index 000000000..78253e421 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/MutatorOperationImpl.java @@ -0,0 +1,111 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +/** + * Operation for mutating integers inside of memcached. + */ +final class MutatorOperationImpl extends OperationImpl + implements MutatorOperation { + + public static final int OVERHEAD=32; + + private static final OperationStatus NOT_FOUND= + new OperationStatus(false, "NOT_FOUND"); + + private final Mutator mutator; + private final String key; + private final int amount; + private final long def; + private final int exp; + + public MutatorOperationImpl(Mutator m, String k, int amt, long d, int e, + OperationCallback c) { + super(c); + mutator=m; + key=k; + amount=amt; + def=d; + exp=e; + } + + @Override + public void handleLine(String line) { + getLogger().debug("BTreeGetResult: %s", line); + OperationStatus found=null; + if(line.equals("NOT_FOUND")) { + found=NOT_FOUND; + } else { + found=new OperationStatus(true, line); + } + getCallback().receivedStatus(found); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + int size=KeyUtil.getKeyBytes(key).length + OVERHEAD; + ByteBuffer b=ByteBuffer.allocate(size); + if (def > -1) { + setArguments(b, mutator.name(), key, amount, 0, exp, def); + } else { + setArguments(b, mutator.name(), key, amount); + } + b.flip(); + setBuffer(b); + } + + @Override + protected void wasCancelled() { + // XXX: Replace this comment with why the hell I did this. + getCallback().receivedStatus(CANCELLED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public int getBy() { + return amount; + } + + public long getDefault() { + return -1; + } + + public int getExpiration() { + return -1; + } + + public Mutator getType() { + return mutator; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/OperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/OperationImpl.java new file mode 100644 index 000000000..ab36ae601 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/OperationImpl.java @@ -0,0 +1,160 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationErrorType; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.protocol.BaseOperationImpl; + +/** + * Operations on a memcached connection. + */ +abstract class OperationImpl extends BaseOperationImpl implements Operation { + + protected static final byte[] CRLF={'\r', '\n'}; + private static final String CHARSET = "UTF-8"; + + private final ByteArrayOutputStream byteBuffer=new ByteArrayOutputStream(); + OperationReadType readType=OperationReadType.LINE; + boolean foundCr=false; + + protected OperationImpl() { + super(); + } + + protected OperationImpl(OperationCallback cb) { + super(); + callback=cb; + } + + /** + * Match the status line provided against one of the given + * OperationStatus objects. If none match, return a failure status with + * the given line. + * + * @param line the current line + * @param statii several status objects + * @return the appropriate status object + */ + protected final OperationStatus matchStatus(String line, + OperationStatus... statii) { + OperationStatus rv=null; + for(OperationStatus status : statii) { + if(line.equals(status.getMessage())) { + rv=status; + } + } + if(rv == null) { + rv=new OperationStatus(false, line); + } + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.protocol.ascii.Operation#getReadType() + */ + protected final OperationReadType getReadType() { + return readType; + } + + /** + * Set the read type of this operation. + */ + protected final void setReadType(OperationReadType to) { + readType=to; + } + + /** + * Set some arguments for an operation into the given byte buffer. + */ + protected final void setArguments(ByteBuffer bb, Object... args) { + boolean wasFirst=true; + for(Object o : args) { + String s = String.valueOf(o); + if(wasFirst) { + wasFirst=false; + } else if (!"".equals(s)) { + bb.put((byte)' '); + } + bb.put(KeyUtil.getKeyBytes(s)); + } + bb.put(CRLF); + } + + OperationErrorType classifyError(String line) { + OperationErrorType rv=null; + if(line.startsWith("ERROR")) { + rv=OperationErrorType.GENERAL; + } else if(line.startsWith("CLIENT_ERROR")) { + rv=OperationErrorType.CLIENT; + } else if(line.startsWith("SERVER_ERROR")) { + rv=OperationErrorType.SERVER; + } + return rv; + } + + @Override + public void readFromBuffer(ByteBuffer data) throws IOException { + // Loop while there's data remaining to get it all drained. + while(getState() != OperationState.COMPLETE && data.remaining() > 0) { + if(readType == OperationReadType.DATA) { + handleRead(data); + } else { + int offset=-1; + for(int i=0; data.remaining() > 0; i++) { + byte b=data.get(); + if(b == '\r') { + foundCr=true; + } else if(b == '\n') { + assert foundCr: "got a \\n without a \\r"; + offset=i; + foundCr=false; + break; + } else { + assert !foundCr : "got a \\r without a \\n"; + byteBuffer.write(b); + } + } + if(offset >= 0) { + String line=new String(byteBuffer.toByteArray(), CHARSET); + byteBuffer.reset(); + OperationErrorType eType=classifyError(line); + if(eType != null) { + handleError(eType, line); + } else { + handleLine(line); + } + } + } + } + } + + /* (non-Javadoc) + * @see net.spy.memcached.protocol.ascii.Operation#handleLine(java.lang.String) + */ + public abstract void handleLine(String line); +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/OperationReadType.java b/src/main/java/net/spy/memcached/protocol/ascii/OperationReadType.java new file mode 100644 index 000000000..f86bb7b41 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/OperationReadType.java @@ -0,0 +1,15 @@ +package net.spy.memcached.protocol.ascii; + +/** + * Data read types. + */ +enum OperationReadType { + /** + * Read type indicating an operation currently wants to read lines. + */ + LINE, + /** + * Read type indicating an operation currently wants to read raw data. + */ + DATA +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/ascii/OptimizedGetImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/OptimizedGetImpl.java new file mode 100644 index 000000000..5a930e76e --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/OptimizedGetImpl.java @@ -0,0 +1,31 @@ +package net.spy.memcached.protocol.ascii; + +import java.util.HashSet; + +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.protocol.ProxyCallback; + +/** + * Optimized Get operation for folding a bunch of gets together. + */ +final class OptimizedGetImpl extends GetOperationImpl { + + private final ProxyCallback pcb; + + /** + * Construct an optimized get starting with the given get operation. + */ + public OptimizedGetImpl(GetOperation firstGet) { + super(new HashSet(), new ProxyCallback()); + pcb=(ProxyCallback)getCallback(); + addOperation(firstGet); + } + + /** + * Add a new GetOperation to get. + */ + public void addOperation(GetOperation o) { + getKeys().addAll(o.getKeys()); + pcb.addCallbacks(o); + } +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/SetAttrOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/SetAttrOperationImpl.java new file mode 100644 index 000000000..deaa35104 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/SetAttrOperationImpl.java @@ -0,0 +1,100 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.ops.CollectionOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.SetAttrOperation; + +class SetAttrOperationImpl extends OperationImpl + implements SetAttrOperation { + + private static final int OVERHEAD = 64; + + private static final OperationStatus ATTR_CANCELED = new CollectionOperationStatus( + false, "collection canceled", CollectionResponse.CANCELED); + + private static final OperationStatus OK = + new CollectionOperationStatus(true, "OK", CollectionResponse.OK); + private static final OperationStatus NOT_FOUND = + new CollectionOperationStatus(false, "NOT_FOUND", CollectionResponse.NOT_FOUND); + private static final OperationStatus ATTR_ERROR_NOT_FOUND = + new CollectionOperationStatus(false, "ATTR_ERROR not found", CollectionResponse.ATTR_ERROR_NOT_FOUND); + private static final OperationStatus ATTR_ERROR_BAD_VALUE = + new CollectionOperationStatus(false, "ATTR_ERROR bad value", CollectionResponse.ATTR_ERROR_BAD_VALUE); + + protected final String key; + protected final Attributes attrs; + + public SetAttrOperationImpl(String key, Attributes attrs, + OperationCallback cb) { + super(cb); + this.key = key; + // If no attributes given, set to default values + this.attrs = (attrs == null)? new CollectionAttributes() : attrs; + } + + @Override + public void handleLine(String line) { + assert getState() == OperationState.READING + : "Read ``" + line + "'' when in " + getState() + " state"; + getCallback().receivedStatus( + matchStatus(line, OK, NOT_FOUND, ATTR_ERROR_NOT_FOUND, + ATTR_ERROR_BAD_VALUE)); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + ByteBuffer bb=ByteBuffer.allocate(KeyUtil.getKeyBytes(key).length + + attrs.getLength() + OVERHEAD); + + setArguments(bb, "setattr", key, attrs); + + bb.flip(); + setBuffer(bb); + + if (getLogger().isDebugEnabled()) { + getLogger().debug("Request in ascii protocol: " + + (new String(bb.array())).replace("\r\n", "\\r\\n")); + } + } + + @Override + protected void wasCancelled() { + getCallback().receivedStatus(ATTR_CANCELED); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public Attributes getAttributes() { + return attrs; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/StatsOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/StatsOperationImpl.java new file mode 100644 index 000000000..22ddc2938 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/StatsOperationImpl.java @@ -0,0 +1,72 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; + +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StatsOperation; + +/** + * Operation to retrieve statistics from a memcached server. + */ +final class StatsOperationImpl extends OperationImpl + implements StatsOperation { + + private static final OperationStatus END=new OperationStatus(true, "END"); + + private static final byte[] MSG="stats\r\n".getBytes(); + + private final byte[] msg; + private final StatsOperation.Callback cb; + + public StatsOperationImpl(String arg, StatsOperation.Callback c) { + super(c); + cb=c; + if(arg == null) { + msg=MSG; + } else { + msg=("stats " + arg + "\r\n").getBytes(); + } + } + + @Override + public void handleLine(String line) { + if(line.startsWith("END")) { + cb.receivedStatus(END); + transitionState(OperationState.COMPLETE); + } else { + String[] parts=line.split(" ", 3); + assert parts.length == 3; + cb.gotStat(parts[1], parts[2]); + } + } + + @Override + public void initialize() { + setBuffer(ByteBuffer.wrap(msg)); + } + + @Override + protected void wasCancelled() { + cb.receivedStatus(CANCELLED); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/StoreOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/StoreOperationImpl.java new file mode 100644 index 000000000..9714846bf --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/StoreOperationImpl.java @@ -0,0 +1,28 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.ops.StoreType; + +/** + * Operation to store data in a memcached server. + */ +final class StoreOperationImpl extends BaseStoreOperationImpl + implements StoreOperation { + + private final StoreType storeType; + + public StoreOperationImpl(StoreType t, String k, int f, int e, + byte[] d, OperationCallback cb) { + super(t.name(), k, f, e, d, cb); + storeType = t; + } + + public StoreType getStoreType() { + return storeType; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/VersionOperationImpl.java b/src/main/java/net/spy/memcached/protocol/ascii/VersionOperationImpl.java new file mode 100644 index 000000000..517d3c5f7 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/VersionOperationImpl.java @@ -0,0 +1,38 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; + +import net.spy.memcached.ops.NoopOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.VersionOperation; + +/** + * Operation to request the version of a memcached server. + */ +final class VersionOperationImpl extends OperationImpl + implements VersionOperation, NoopOperation { + + private static final byte[] REQUEST="version\r\n".getBytes(); + + public VersionOperationImpl(OperationCallback c) { + super(c); + } + + @Override + public void handleLine(String line) { + assert line.startsWith("VERSION "); + getCallback().receivedStatus( + new OperationStatus(true, line.substring("VERSION ".length()))); + transitionState(OperationState.COMPLETE); + } + + @Override + public void initialize() { + setBuffer(ByteBuffer.wrap(REQUEST)); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/ascii/package.html b/src/main/java/net/spy/memcached/protocol/ascii/package.html new file mode 100644 index 000000000..116740d16 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/ascii/package.html @@ -0,0 +1,14 @@ + + + + + + Low-level operations for the memcached ascii protocol + + + +

Low-level operations for the memcached ascii protocol

+ + + diff --git a/src/main/java/net/spy/memcached/protocol/binary/BinaryMemcachedNodeImpl.java b/src/main/java/net/spy/memcached/protocol/binary/BinaryMemcachedNodeImpl.java new file mode 100644 index 000000000..d86a62393 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/BinaryMemcachedNodeImpl.java @@ -0,0 +1,89 @@ +package net.spy.memcached.protocol.binary; + +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.util.concurrent.BlockingQueue; + +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.protocol.ProxyCallback; +import net.spy.memcached.protocol.TCPMemcachedNodeImpl; + +/** + * Implementation of MemcachedNode for speakers of the binary protocol. + */ +public class BinaryMemcachedNodeImpl extends TCPMemcachedNodeImpl { + + private final int MAX_SET_OPTIMIZATION_COUNT = 65535; + private final int MAX_SET_OPTIMIZATION_BYTES = 2 * 1024 * 1024; + + public BinaryMemcachedNodeImpl(SocketAddress sa, SocketChannel c, + int bufSize, BlockingQueue rq, + BlockingQueue wq, BlockingQueue iq, + Long opQueueMaxBlockTimeNs, boolean waitForAuth) { + super(sa, c, bufSize, rq, wq, iq, opQueueMaxBlockTimeNs, + waitForAuth); + } + + @Override + protected void optimize() { + Operation firstOp = writeQ.peek(); + if(firstOp instanceof GetOperation) { + optimizeGets(); + } else if(firstOp instanceof CASOperation) { + optimizeSets(); + } + } + + private void optimizeGets() { + // make sure there are at least two get operations in a row before + // attempting to optimize them. + optimizedOp=writeQ.remove(); + if(writeQ.peek() instanceof GetOperation) { + OptimizedGetImpl og=new OptimizedGetImpl( + (GetOperation)optimizedOp); + optimizedOp=og; + + while(writeQ.peek() instanceof GetOperation) { + GetOperation o=(GetOperation) writeQ.remove(); + if(!o.isCancelled()) { + og.addOperation(o); + } + } + + // Initialize the new mega get + optimizedOp.initialize(); + assert optimizedOp.getState() == OperationState.WRITING; + ProxyCallback pcb=(ProxyCallback) og.getCallback(); + getLogger().debug("Set up %s with %s keys and %s callbacks", + this, pcb.numKeys(), pcb.numCallbacks()); + } + } + + private void optimizeSets() { + // make sure there are at least two get operations in a row before + // attempting to optimize them. + optimizedOp=writeQ.remove(); + if(writeQ.peek() instanceof CASOperation) { + OptimizedSetImpl og=new OptimizedSetImpl( + (CASOperation)optimizedOp); + optimizedOp=og; + + while(writeQ.peek() instanceof StoreOperation + && og.size() < MAX_SET_OPTIMIZATION_COUNT + && og.bytes() < MAX_SET_OPTIMIZATION_BYTES) { + CASOperation o=(CASOperation) writeQ.remove(); + if(!o.isCancelled()) { + og.addOperation(o); + } + } + + // Initialize the new mega set + optimizedOp.initialize(); + assert optimizedOp.getState() == OperationState.WRITING; + } + } +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/BinaryOperationFactory.java b/src/main/java/net/spy/memcached/protocol/binary/BinaryOperationFactory.java new file mode 100644 index 000000000..e65ccae3c --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/BinaryOperationFactory.java @@ -0,0 +1,339 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.protocol.binary; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; + +import net.spy.memcached.collection.Attributes; +import net.spy.memcached.collection.BTreeFindPosition; +import net.spy.memcached.collection.BTreeGetBulk; +import net.spy.memcached.collection.BTreeGetByPosition; +import net.spy.memcached.collection.BTreeSMGet; +import net.spy.memcached.collection.BTreeStoreAndGet; +import net.spy.memcached.collection.CollectionBulkStore; +import net.spy.memcached.collection.CollectionCount; +import net.spy.memcached.collection.CollectionCreate; +import net.spy.memcached.collection.CollectionDelete; +import net.spy.memcached.collection.CollectionExist; +import net.spy.memcached.collection.CollectionGet; +import net.spy.memcached.collection.CollectionMutate; +import net.spy.memcached.collection.CollectionPipedStore; +import net.spy.memcached.collection.CollectionPipedUpdate; +import net.spy.memcached.collection.CollectionStore; +import net.spy.memcached.collection.CollectionUpdate; +import net.spy.memcached.collection.SetPipedExist; +import net.spy.memcached.ops.BTreeFindPositionOperation; +import net.spy.memcached.ops.BTreeGetBulkOperation; +import net.spy.memcached.ops.BTreeGetByPositionOperation; +import net.spy.memcached.ops.BTreeSortMergeGetOperation; +import net.spy.memcached.ops.BTreeStoreAndGetOperation; +import net.spy.memcached.ops.BaseOperationFactory; +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.CollectionBulkStoreOperation; +import net.spy.memcached.ops.CollectionCountOperation; +import net.spy.memcached.ops.CollectionCreateOperation; +import net.spy.memcached.ops.CollectionDeleteOperation; +import net.spy.memcached.ops.CollectionExistOperation; +import net.spy.memcached.ops.CollectionGetOperation; +import net.spy.memcached.ops.CollectionMutateOperation; +import net.spy.memcached.ops.CollectionPipedExistOperation; +import net.spy.memcached.ops.CollectionPipedStoreOperation; +import net.spy.memcached.ops.CollectionPipedUpdateOperation; +import net.spy.memcached.ops.CollectionStoreOperation; +import net.spy.memcached.ops.CollectionUpdateOperation; +import net.spy.memcached.ops.ConcatenationOperation; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.ExtendedBTreeGetOperation; +import net.spy.memcached.ops.FlushOperation; +import net.spy.memcached.ops.GetAttrOperation; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetOperation.Callback; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.KeyedOperation; +import net.spy.memcached.ops.MultiGetOperationCallback; +import net.spy.memcached.ops.MultiGetsOperationCallback; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.NoopOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.SASLAuthOperation; +import net.spy.memcached.ops.SASLMechsOperation; +import net.spy.memcached.ops.SASLStepOperation; +import net.spy.memcached.ops.SetAttrOperation; +import net.spy.memcached.ops.StatsOperation; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.ops.StoreType; +import net.spy.memcached.ops.VersionOperation; + +/** + * Factory for binary operations. + */ +public class BinaryOperationFactory extends BaseOperationFactory { + + public DeleteOperation delete(String key, + OperationCallback operationCallback) { + return new DeleteOperationImpl(key, operationCallback); + } + + public FlushOperation flush(int delay, OperationCallback cb) { + return new FlushOperationImpl(cb); + } + + public GetOperation get(String key, Callback callback) { + return new GetOperationImpl(key, callback); + } + + public GetOperation get(Collection value, Callback cb) { + return new MultiGetOperationImpl(value, cb); + } + + public GetsOperation gets(String key, GetsOperation.Callback cb) { + return new GetOperationImpl(key, cb); + } + + public MutatorOperation mutate(Mutator m, String key, int by, + long def, int exp, OperationCallback cb) { + return new MutatorOperationImpl(m, key, by, def, exp, cb); + } + + public StatsOperation stats(String arg, + net.spy.memcached.ops.StatsOperation.Callback cb) { + return new StatsOperationImpl(arg, cb); + } + + public StoreOperation store(StoreType storeType, String key, int flags, + int exp, byte[] data, OperationCallback cb) { + return new StoreOperationImpl(storeType, key, flags, exp, data, 0, cb); + } + + public VersionOperation version(OperationCallback cb) { + return new VersionOperationImpl(cb); + } + + public NoopOperation noop(OperationCallback cb) { + return new NoopOperationImpl(cb); + } + + public CASOperation cas(StoreType type, String key, long casId, int flags, + int exp, byte[] data, OperationCallback cb) { + return new StoreOperationImpl(type, key, flags, exp, data, + casId, cb); + } + + public ConcatenationOperation cat(ConcatenationType catType, long casId, + String key, byte[] data, OperationCallback cb) { + return new ConcatenationOperationImpl(catType, key, data, casId, cb); + } + + @Override + protected Collection cloneGet(KeyedOperation op) { + Collection rv=new ArrayList(); + GetOperation.Callback getCb = null; + GetsOperation.Callback getsCb = null; + if(op.getCallback() instanceof GetOperation.Callback) { + getCb=new MultiGetOperationCallback( + op.getCallback(), op.getKeys().size()); + } else { + getsCb=new MultiGetsOperationCallback( + op.getCallback(), op.getKeys().size()); + } + for(String k : op.getKeys()) { + rv.add(getCb == null ? gets(k, getsCb) : get(k, getCb)); + } + return rv; + } + + public SASLAuthOperation saslAuth(String[] mech, String serverName, + Map props, CallbackHandler cbh, OperationCallback cb) { + return new SASLAuthOperationImpl(mech, serverName, props, cbh, cb); + } + + public SASLMechsOperation saslMechs(OperationCallback cb) { + return new SASLMechsOperationImpl(cb); + } + + public SASLStepOperation saslStep(String[] mech, byte[] challenge, + String serverName, Map props, CallbackHandler cbh, + OperationCallback cb) { + return new SASLStepOperationImpl(mech, challenge, serverName, + props, cbh, cb); + } + + //// UNSUPPORTED //// + + public SetAttrOperation setAttr(String key, Attributes attrs, + OperationCallback cb) { + throw new RuntimeException( + "SetAttrOperation is not supported in binary protocol yet."); + } + + public GetAttrOperation getAttr(String key, + net.spy.memcached.ops.GetAttrOperation.Callback cb) { + throw new RuntimeException( + "GetAttrOperation is not supported in binary protocol yet."); + } + + public CollectionStoreOperation collectionStore(String key, String subkey, + CollectionStore collectionStore, byte[] data, + OperationCallback cb) { + throw new RuntimeException( + "CollectionStoreOperation is not supported in binary protocol yet."); + } + + public CollectionStoreOperation collectionStore(String key, byte[] subkey, + CollectionStore collectionStore, byte[] data, + OperationCallback cb) { + throw new RuntimeException( + "CollectionStoreOperation is not supported in binary protocol yet."); + } + + public CollectionPipedStoreOperation collectionPipedStore(String key, + CollectionPipedStore store, OperationCallback cb) { + throw new RuntimeException( + "CollectionPipedStoreOperation is not supported in binary protocol yet."); + } + + public CollectionGetOperation collectionGet(String key, + CollectionGet collectionGet, + net.spy.memcached.ops.CollectionGetOperation.Callback cb) { + throw new RuntimeException( + "CollectionGetOperation is not supported in binary protocol yet."); + } + + public CollectionGetOperation collectionGet2(String key, + CollectionGet collectionGet, ExtendedBTreeGetOperation.Callback cb) { + throw new RuntimeException( + "CollectionGetOperation is not supported in binary protocol yet."); + } + + public CollectionDeleteOperation collectionDelete(String key, + CollectionDelete collectionDelete, OperationCallback cb) { + throw new RuntimeException( + "CollectionDeleteOperation is not supported in binary protocol yet."); + } + + public CollectionExistOperation collectionExist(String key, String subkey, + CollectionExist collectionExist, OperationCallback cb) { + throw new RuntimeException( + "CollectionExistOperation is not supported in binary protocol yet."); + } + + public CollectionCreateOperation collectionCreate(String key, + CollectionCreate collectionCreate, OperationCallback cb) { + throw new RuntimeException( + "CollectionCreateOperation is not supported in binary protocol yet."); + } + + public CollectionCountOperation collectionCount(String key, + CollectionCount collectionCount, OperationCallback cb) { + throw new RuntimeException( + "CollectionCountOperation is not supported in binary protocol yet."); + } + + public FlushOperation flush(String prefix, int delay, boolean noreply, OperationCallback cb) { + throw new RuntimeException( + "Flush by prefix operation is not supported in binary protocol yet."); + } + + @Override + public BTreeSortMergeGetOperation bopsmget(BTreeSMGet smGet, + BTreeSortMergeGetOperation.Callback cb) { + throw new RuntimeException( + "B+ tree sort merge get operation is not supported in binary protocol yet."); + } + + @Override + public CollectionStoreOperation collectionUpsert(String key, String subkey, + CollectionStore collectionStore, byte[] data, + OperationCallback cb) { + throw new RuntimeException( + "B+ tree upsert operation is not supported in binary protocol yet."); + } + + @Override + public CollectionUpdateOperation collectionUpdate(String key, + String subkey, CollectionUpdate collectionUpdate, byte[] data, + OperationCallback cb) { + throw new RuntimeException( + "Collection update operation is not supported in binary protocol yet."); + } + + @Override + public CollectionPipedUpdateOperation collectionPipedUpdate(String key, + CollectionPipedUpdate update, OperationCallback cb) { + throw new RuntimeException( + "CollectionPipedStoreOperation is not supported in binary protocol yet."); + } + + @Override + public CollectionMutateOperation collectionMutate(String key, + String subkey, CollectionMutate collectionMutate, OperationCallback cb) { + throw new RuntimeException( + "Collection mutate(incr/decr) operation is not supported in binary protocol yet."); + } + + @Override + public CollectionPipedExistOperation collectionPipedExist(String key, + SetPipedExist exist, OperationCallback cb) { + throw new RuntimeException( + "Collection piped exist operation is not supported in binary protocol yet."); + } + + @Override + public CollectionBulkStoreOperation collectionBulkStore( + List key, CollectionBulkStore store, + OperationCallback cb) { + throw new RuntimeException( + "Collection piped store2 operation is not supported in binary protocol yet."); + } + + @Override + public BTreeGetBulkOperation bopGetBulk(BTreeGetBulk get, + BTreeGetBulkOperation.Callback cb) { + throw new RuntimeException( + "BTree get bulk operation is not supported in binary protocol yet."); + } + + @Override + public BTreeGetByPositionOperation bopGetByPosition(String key, + BTreeGetByPosition get, OperationCallback cb) { + throw new RuntimeException( + "BTree get by position operation is not supported in binary protocol yet."); + } + + @Override + public BTreeFindPositionOperation bopFindPosition(String key, + BTreeFindPosition get, OperationCallback cb) { + throw new RuntimeException( + "BTree find position operation is not supported in binary protocol yet."); + } + + @Override + public BTreeStoreAndGetOperation bopStoreAndGet(String key, + BTreeStoreAndGet get, byte[] dataToStore, OperationCallback cb) { + throw new RuntimeException( + "BTree store and get operation is not supported in binary protocol yet."); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/ConcatenationOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/ConcatenationOperationImpl.java new file mode 100644 index 000000000..49a025cee --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/ConcatenationOperationImpl.java @@ -0,0 +1,80 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.ops.ConcatenationOperation; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; + +class ConcatenationOperationImpl extends OperationImpl + implements ConcatenationOperation { + + private static final int APPEND=0x0e; + private static final int PREPEND=0x0f; + + private final String key; + private final long cas; + private final ConcatenationType catType; + private final byte[] data; + + private static int cmdMap(ConcatenationType t) { + int rv=-1; + switch(t) { + case append: rv=APPEND; break; + case prepend: rv=PREPEND; break; + } + // Check fall-through. + assert rv != -1 : "Unhandled store type: " + t; + return rv; + } + + public ConcatenationOperationImpl(ConcatenationType t, String k, + byte[] d, long c, OperationCallback cb) { + super(cmdMap(t), generateOpaque(), cb); + key=k; + data=d; + cas=c; + catType=t; + } + + @Override + public void initialize() { + prepareBuffer(key, cas, data); + } + + @Override + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + OperationStatus rv=null; + switch(errCode) { + case ERR_EXISTS: + rv=EXISTS_STATUS; + break; + case ERR_NOT_FOUND: + rv=NOT_FOUND_STATUS; + break; + case ERR_NOT_STORED: + rv=NOT_FOUND_STATUS; + break; + } + return rv; + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public long getCasValue() { + return cas; + } + + public byte[] getData() { + return data; + } + + public ConcatenationType getStoreType() { + return catType; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/DeleteOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/DeleteOperationImpl.java new file mode 100644 index 000000000..8f3491bac --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/DeleteOperationImpl.java @@ -0,0 +1,42 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; + +class DeleteOperationImpl extends OperationImpl implements + DeleteOperation { + + private static final int CMD=4; + + private final String key; + private final long cas; + + public DeleteOperationImpl(String k, OperationCallback cb) { + this(k, 0, cb); + } + + public DeleteOperationImpl(String k, long c, OperationCallback cb) { + super(CMD, generateOpaque(), cb); + key=k; + cas=c; + } + + @Override + public void initialize() { + prepareBuffer(key, cas, EMPTY_BYTES); + } + + @Override + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + return errCode == ERR_NOT_FOUND ? NOT_FOUND_STATUS : null; + } + + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/FlushOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/FlushOperationImpl.java new file mode 100644 index 000000000..406a2347a --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/FlushOperationImpl.java @@ -0,0 +1,25 @@ +package net.spy.memcached.protocol.binary; + +import net.spy.memcached.ops.FlushOperation; +import net.spy.memcached.ops.OperationCallback; + +class FlushOperationImpl extends OperationImpl implements FlushOperation { + + private static final int CMD=8; + private final int delay; + + public FlushOperationImpl(OperationCallback cb) { + this(0, cb); + } + + public FlushOperationImpl(int d, OperationCallback cb) { + super(CMD, generateOpaque(), cb); + delay=d; + } + + @Override + public void initialize() { + prepareBuffer("", 0, EMPTY_BYTES, delay); + } + +} \ No newline at end of file diff --git a/src/main/java/net/spy/memcached/protocol/binary/GetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/GetOperationImpl.java new file mode 100644 index 000000000..19ee47d21 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/GetOperationImpl.java @@ -0,0 +1,62 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.OperationStatus; + +class GetOperationImpl extends OperationImpl + implements GetOperation, GetsOperation { + + static final int CMD=0; + + /** + * Length of the extra header stuff for a GET response. + */ + static final int EXTRA_HDR_LEN=4; + + private final String key; + + public GetOperationImpl(String k, GetOperation.Callback cb) { + super(CMD, generateOpaque(), cb); + key=k; + } + + public GetOperationImpl(String k, GetsOperation.Callback cb) { + super(CMD, generateOpaque(), cb); + key=k; + } + + @Override + public void initialize() { + prepareBuffer(key, 0, EMPTY_BYTES); + } + + @Override + protected void decodePayload(byte[] pl) { + final int flags=decodeInt(pl, 0); + final byte[] data=new byte[pl.length - EXTRA_HDR_LEN]; + System.arraycopy(pl, EXTRA_HDR_LEN, data, 0, pl.length-EXTRA_HDR_LEN); + // Assume we're processing a get unless the cast fails. + try { + GetOperation.Callback cb=(GetOperation.Callback)getCallback(); + cb.gotData(key, flags, data); + } catch(ClassCastException e) { + GetsOperation.Callback cb=(GetsOperation.Callback)getCallback(); + cb.gotData(key, flags, responseCas, data); + } + getCallback().receivedStatus(STATUS_OK); + } + + @Override + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + return errCode == ERR_NOT_FOUND ? NOT_FOUND_STATUS : null; + } + + public Collection getKeys() { + return Collections.singleton(key); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/MultiGetOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/MultiGetOperationImpl.java new file mode 100644 index 000000000..b0feed752 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/MultiGetOperationImpl.java @@ -0,0 +1,116 @@ +package net.spy.memcached.protocol.binary; + +import static net.spy.memcached.protocol.binary.GetOperationImpl.EXTRA_HDR_LEN; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; + +class MultiGetOperationImpl extends OperationImpl implements GetOperation { + + private static final int CMD_GETQ=9; + + private final Map keys=new HashMap(); + private final Map bkeys=new HashMap(); + private final Map rkeys=new HashMap(); + + private final int terminalOpaque=generateOpaque(); + + public MultiGetOperationImpl(Collection k, OperationCallback cb) { + super(-1, -1, cb); + for(String s : new HashSet(k)) { + addKey(s); + } + } + + /** + * Add a key (and return its new opaque value). + */ + protected int addKey(String k) { + Integer rv=rkeys.get(k); + if(rv == null) { + rv=generateOpaque(); + keys.put(rv, k); + bkeys.put(rv, KeyUtil.getKeyBytes(k)); + rkeys.put(k, rv); + } + return rv; + } + + @Override + public void initialize() { + int size=(1+keys.size()) * MIN_RECV_PACKET; + for(byte[] b : bkeys.values()) { + size += b.length; + } + // set up the initial header stuff + ByteBuffer bb=ByteBuffer.allocate(size); + for(Map.Entry me : bkeys.entrySet()) { + final byte[] keyBytes=me.getValue(); + + // Custom header + bb.put(REQ_MAGIC); + bb.put((byte)CMD_GETQ); + bb.putShort((short)keyBytes.length); + bb.put((byte)0); // extralen + bb.put((byte)0); // data type + bb.putShort((short)0); // reserved + bb.putInt(keyBytes.length); + bb.putInt(me.getKey()); + bb.putLong(0); // cas + // the actual key + bb.put(keyBytes); + } + // Add the noop + bb.put(REQ_MAGIC); + bb.put((byte)NoopOperationImpl.CMD); + bb.putShort((short)0); + bb.put((byte)0); // extralen + bb.put((byte)0); // data type + bb.putShort((short)0); // reserved + bb.putInt(0); + bb.putInt(terminalOpaque); + bb.putLong(0); // cas + + bb.flip(); + setBuffer(bb); + } + + @Override + protected void finishedPayload(byte[] pl) throws IOException { + if(responseOpaque == terminalOpaque) { + getCallback().receivedStatus(STATUS_OK); + transitionState(OperationState.COMPLETE); + } else if(errorCode != 0) { + getLogger().warn("Error on key %s: %s (%d)", + keys.get(responseOpaque), new String(pl), errorCode); + } else { + final int flags=decodeInt(pl, 0); + final byte[] data=new byte[pl.length - EXTRA_HDR_LEN]; + System.arraycopy(pl, EXTRA_HDR_LEN, data, + 0, pl.length-EXTRA_HDR_LEN); + Callback cb=(Callback)getCallback(); + cb.gotData(keys.get(responseOpaque), flags, data); + } + resetInput(); + } + + @Override + protected boolean opaqueIsValid() { + return responseOpaque == terminalOpaque + || keys.containsKey(responseOpaque); + } + + public Collection getKeys() { + return keys.values(); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/MutatorOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/MutatorOperationImpl.java new file mode 100644 index 000000000..b4d88752b --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/MutatorOperationImpl.java @@ -0,0 +1,80 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; + +class MutatorOperationImpl extends OperationImpl implements + MutatorOperation { + + private static final int CMD_INCR=5; + private static final int CMD_DECR=6; + + private final Mutator mutator; + private final String key; + private final long by; + private final int exp; + private final long def; + + public MutatorOperationImpl(Mutator m, String k, long b, + long d, int e, OperationCallback cb) { + super(m == Mutator.incr ? CMD_INCR : CMD_DECR, generateOpaque(), cb); + assert d >= 0 : "Default value is below zero"; + mutator=m; + key=k; + by=b; + exp=e; + def=d; + } + + @Override + public void initialize() { + // We're passing around a long so we can cover an unsigned integer. + byte[] defBytes=new byte[8]; + defBytes[0]=(byte)((def >> 56) & 0xff); + defBytes[1]=(byte)((def >> 48) & 0xff); + defBytes[2]=(byte)((def >> 40) & 0xff); + defBytes[3]=(byte)((def >> 32) & 0xff); + defBytes[4]=(byte)((def >> 24) & 0xff); + defBytes[5]=(byte)((def >> 16) & 0xff); + defBytes[6]=(byte)((def >> 8) & 0xff); + defBytes[7]=(byte)(def & 0xff); + prepareBuffer(key, 0, EMPTY_BYTES, by, defBytes, exp); + } + + @Override + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + return errCode == ERR_NOT_FOUND ? NOT_FOUND_STATUS : null; + } + + @Override + protected void decodePayload(byte[] pl) { + getCallback().receivedStatus(new OperationStatus(true, + String.valueOf(decodeLong(pl, 0)))); + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public int getBy() { + return (int) by; + } + + public long getDefault() { + return def; + } + + public int getExpiration() { + return exp; + } + + public Mutator getType() { + return mutator; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/NoopOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/NoopOperationImpl.java new file mode 100644 index 000000000..dc2590109 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/NoopOperationImpl.java @@ -0,0 +1,22 @@ +package net.spy.memcached.protocol.binary; + +import net.spy.memcached.ops.NoopOperation; +import net.spy.memcached.ops.OperationCallback; + +/** + * Implementation of a noop operation. + */ +class NoopOperationImpl extends OperationImpl implements NoopOperation { + + static final int CMD=10; + + public NoopOperationImpl(OperationCallback cb) { + super(CMD, generateOpaque(), cb); + } + + @Override + public void initialize() { + prepareBuffer("", 0, EMPTY_BYTES); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/OperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/OperationImpl.java new file mode 100644 index 000000000..7796a603a --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/OperationImpl.java @@ -0,0 +1,295 @@ +package net.spy.memcached.protocol.binary; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.concurrent.atomic.AtomicInteger; + +import net.spy.memcached.CASResponse; +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.CASOperationStatus; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationErrorType; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.protocol.BaseOperationImpl; + +/** + * Base class for binary operations. + */ +abstract class OperationImpl extends BaseOperationImpl { + + protected static final byte REQ_MAGIC = (byte)0x80; + protected static final byte RES_MAGIC = (byte)0x81; + protected static final int MIN_RECV_PACKET=24; + + /** + * Error code for items that were not found. + */ + protected static final int ERR_NOT_FOUND = 1; + protected static final int ERR_EXISTS = 2; + protected static final int ERR_EINVAL = 4; + protected static final int ERR_NOT_STORED = 5; + + protected static final OperationStatus NOT_FOUND_STATUS = + new CASOperationStatus(false, "Not Found", CASResponse.NOT_FOUND); + protected static final OperationStatus EXISTS_STATUS = + new CASOperationStatus(false, "Object exists", CASResponse.EXISTS); + protected static final OperationStatus NOT_STORED_STATUS = + new CASOperationStatus(false, "Not Stored", CASResponse.NOT_FOUND); + + protected static final byte[] EMPTY_BYTES = new byte[0]; + + protected static final OperationStatus STATUS_OK = + new CASOperationStatus(true, "OK", CASResponse.OK); + + private static final AtomicInteger seqNumber=new AtomicInteger(0); + + // request header fields + private final int cmd; + protected final int opaque; + + private final byte[] header=new byte[MIN_RECV_PACKET]; + private int headerOffset=0; + private byte[] payload=null; + + // Response header fields + protected int keyLen; + protected int responseCmd; + protected int errorCode; + protected int responseOpaque; + protected long responseCas; + + private int payloadOffset=0; + + /** + * Construct with opaque. + * + * @param o the opaque value. + * @param cb + */ + protected OperationImpl(int c, int o, OperationCallback cb) { + super(); + cmd=c; + opaque=o; + setCallback(cb); + } + + protected void resetInput() { + payload=null; + payloadOffset=0; + headerOffset=0; + } + + // Base response packet format: + // 0 1 2 3 4 5 6 7 8 9 10 11 + // # magic, opcode, keylen, extralen, datatype, status, bodylen, + // 12,3,4,5 16 + // opaque, cas + // RES_PKT_FMT=">BBHBBHIIQ" + + @Override + public void readFromBuffer(ByteBuffer b) throws IOException { + // First process headers if we haven't completed them yet + if(headerOffset < MIN_RECV_PACKET) { + int toRead=MIN_RECV_PACKET - headerOffset; + int available=b.remaining(); + toRead=Math.min(toRead, available); + getLogger().debug("Reading %d header bytes", toRead); + b.get(header, headerOffset, toRead); + headerOffset+=toRead; + + // We've completed reading the header. Prepare body read. + if(headerOffset == MIN_RECV_PACKET) { + int magic=header[0]; + assert magic == RES_MAGIC : "Invalid magic: " + magic; + responseCmd=header[1]; + assert cmd == -1 || responseCmd == cmd + : "Unexpected response command value"; + keyLen=decodeShort(header, 2); + // TODO: Examine extralen and datatype + errorCode=decodeShort(header, 6); + int bytesToRead=decodeInt(header, 8); + payload=new byte[bytesToRead]; + responseOpaque=decodeInt(header, 12); + responseCas=decodeLong(header, 16); + assert opaqueIsValid() : "Opaque is not valid"; + } + } + + // Now process the payload if we can. + if(headerOffset >= MIN_RECV_PACKET && payload == null) { + finishedPayload(EMPTY_BYTES); + } else if(payload != null) { + int toRead=payload.length - payloadOffset; + int available=b.remaining(); + toRead=Math.min(toRead, available); + getLogger().debug("Reading %d payload bytes", toRead); + b.get(payload, payloadOffset, toRead); + payloadOffset+=toRead; + + // Have we read it all? + if(payloadOffset == payload.length) { + finishedPayload(payload); + } + } else { + // Haven't read enough to make up a payload. Must read more. + getLogger().debug("Only read %d of the %d needed to fill a header", + headerOffset, MIN_RECV_PACKET); + } + + } + + protected void finishedPayload(byte[] pl) throws IOException { + if(errorCode != 0) { + OperationStatus status=getStatusForErrorCode(errorCode, pl); + if(status == null) { + handleError(OperationErrorType.SERVER, new String(pl)); + } else { + getCallback().receivedStatus(status); + transitionState(OperationState.COMPLETE); + } + } else { + decodePayload(pl); + transitionState(OperationState.COMPLETE); + } + } + + /** + * Get the OperationStatus object for the given error code. + * + * @param errCode the error code + * @return the status to return, or null if this is an exceptional case + */ + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + return null; + } + + /** + * Decode the given payload for this command. + * + * @param pl the payload. + */ + protected void decodePayload(byte[] pl) { + assert pl.length == 0 : "Payload has bytes, but decode isn't overridden"; + getCallback().receivedStatus(STATUS_OK); + } + + /** + * Validate an opaque value from the header. + * This may be overridden from a subclass where the opaque isn't expected + * to always be the same as the request opaque. + */ + protected boolean opaqueIsValid() { + if(responseOpaque != opaque) { + getLogger().warn("Expected opaque: %d, got opaque: %d\n", + responseOpaque, opaque); + } + return responseOpaque == opaque; + } + + static int decodeShort(byte[] data, int i) { + return (data[i] & 0xff) << 8 + | (data[i+1] & 0xff); + } + + static int decodeInt(byte[] data, int i) { + return (data[i] & 0xff) << 24 + | (data[i+1] & 0xff) << 16 + | (data[i+2] & 0xff) << 8 + | (data[i+3] & 0xff); + } + + static long decodeUnsignedInt(byte[] data, int i) { + return ((long)(data[i] & 0xff) << 24) + | ((data[i+1] & 0xff) << 16) + | ((data[i+2] & 0xff) << 8) + | (data[i+3] & 0xff); + } + + static long decodeLong(byte[] data, int i) { + return(data[i ] & 0xff) << 56 + | (data[i+1] & 0xff) << 48 + | (data[i+2] & 0xff) << 40 + | (data[i+3] & 0xff) << 32 + | (data[i+4] & 0xff) << 24 + | (data[i+5] & 0xff) << 16 + | (data[i+6] & 0xff) << 8 + | (data[i+7] & 0xff); + } + + /** + * Prepare a send buffer. + * + * @param key the key (for keyed ops) + * @param cas the cas value + * @param val the data payload + * @param extraHeaders any additional headers that need to be sent + */ + protected void prepareBuffer(String key, long cas, byte[] val, + Object... extraHeaders) { + int extraLen=0; + for(Object o : extraHeaders) { + if(o instanceof Integer) { + extraLen += 4; + } else if(o instanceof byte[]) { + extraLen += ((byte[])o).length; + } else if(o instanceof Long) { + extraLen += 8; + } else { + assert false : "Unhandled extra header type: " + o.getClass(); + } + } + final byte[] keyBytes=KeyUtil.getKeyBytes(key); + int bufSize=MIN_RECV_PACKET + keyBytes.length + val.length; + + // # magic, opcode, keylen, extralen, datatype, [reserved], + // bodylen, opaque, cas + // REQ_PKT_FMT=">BBHBBxxIIQ" + + // set up the initial header stuff + ByteBuffer bb=ByteBuffer.allocate(bufSize + extraLen); + assert bb.order() == ByteOrder.BIG_ENDIAN; + bb.put(REQ_MAGIC); + bb.put((byte)cmd); + bb.putShort((short)keyBytes.length); + bb.put((byte)extraLen); + bb.put((byte)0); // data type + bb.putShort((short)0); // reserved + bb.putInt(keyBytes.length + val.length + extraLen); + bb.putInt(opaque); + bb.putLong(cas); + + // Add the extra headers. + for(Object o : extraHeaders) { + if(o instanceof Integer) { + bb.putInt((Integer)o); + } else if(o instanceof byte[]) { + bb.put((byte[])o); + } else if(o instanceof Long) { + bb.putLong((Long)o); + } else { + assert false : "Unhandled extra header type: " + o.getClass(); + } + } + + // Add the normal stuff + bb.put(keyBytes); + bb.put(val); + + bb.flip(); + setBuffer(bb); + } + + /** + * Generate an opaque ID. + */ + static int generateOpaque() { + int rv = seqNumber.incrementAndGet(); + while(rv < 0) { + seqNumber.compareAndSet(rv, 0); + rv=seqNumber.incrementAndGet(); + } + return rv; + } +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/OptimizedGetImpl.java b/src/main/java/net/spy/memcached/protocol/binary/OptimizedGetImpl.java new file mode 100644 index 000000000..362cd4bb2 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/OptimizedGetImpl.java @@ -0,0 +1,34 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Collections; + +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.protocol.ProxyCallback; + +/** + * Optimized Get operation for folding a bunch of gets together. + */ +final class OptimizedGetImpl extends MultiGetOperationImpl { + + private final ProxyCallback pcb; + + /** + * Construct an optimized get starting with the given get operation. + */ + public OptimizedGetImpl(GetOperation firstGet) { + super(Collections.emptySet(), new ProxyCallback()); + pcb=(ProxyCallback)getCallback(); + addOperation(firstGet); + } + + /** + * Add a new GetOperation to get. + */ + public void addOperation(GetOperation o) { + pcb.addCallbacks(o); + for(String k : o.getKeys()) { + addKey(k); + } + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/OptimizedSetImpl.java b/src/main/java/net/spy/memcached/protocol/binary/OptimizedSetImpl.java new file mode 100644 index 000000000..46f8a4410 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/OptimizedSetImpl.java @@ -0,0 +1,171 @@ +package net.spy.memcached.protocol.binary; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import net.spy.memcached.KeyUtil; +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StoreType; + +public class OptimizedSetImpl extends OperationImpl implements Operation { + + private static final OperationCallback NOOP_CALLBACK = new NoopCallback(); + + private final int terminalOpaque=generateOpaque(); + private final Map callbacks = + new HashMap(); + private final List ops = new ArrayList(); + + // If nothing else, this will be a NOOP. + private int byteCount = MIN_RECV_PACKET; + + /** + * Construct an optimized get starting with the given get operation. + */ + public OptimizedSetImpl(CASOperation firstStore) { + super(-1, -1, NOOP_CALLBACK); + addOperation(firstStore); + } + + public void addOperation(CASOperation op) { + ops.add(op); + + // Count the bytes required by this operation. + Iterator is = op.getKeys().iterator(); + String k = is.next(); + int keylen = KeyUtil.getKeyBytes(k).length; + + byteCount += MIN_RECV_PACKET + StoreOperationImpl.EXTRA_LEN + + keylen + op.getBytes().length; + } + + public int size() { + return ops.size(); + } + + public int bytes() { + return byteCount; + } + + @Override + public void initialize() { + // Now create a buffer. + ByteBuffer bb=ByteBuffer.allocate(byteCount); + for(CASOperation so : ops) { + Iterator is = so.getKeys().iterator(); + String k = is.next(); + byte[] keyBytes = KeyUtil.getKeyBytes(k); + assert !is.hasNext(); + + int myOpaque = generateOpaque(); + callbacks.put(myOpaque, so.getCallback()); + byte[] data = so.getBytes(); + + // Custom header + bb.put(REQ_MAGIC); + bb.put((byte)cmdMap(so.getStoreType())); + bb.putShort((short)keyBytes.length); + bb.put((byte)StoreOperationImpl.EXTRA_LEN); // extralen + bb.put((byte)0); // data type + bb.putShort((short)0); // reserved + bb.putInt(keyBytes.length + data.length + + StoreOperationImpl.EXTRA_LEN); + bb.putInt(myOpaque); + bb.putLong(so.getCasValue()); // cas + // Extras + bb.putInt(so.getFlags()); + bb.putInt(so.getExpiration()); + // the actual key + bb.put(keyBytes); + // And the value + bb.put(data); + } + // Add the noop + bb.put(REQ_MAGIC); + bb.put((byte)NoopOperationImpl.CMD); + bb.putShort((short)0); + bb.put((byte)0); // extralen + bb.put((byte)0); // data type + bb.putShort((short)0); // reserved + bb.putInt(0); + bb.putInt(terminalOpaque); + bb.putLong(0); // cas + + bb.flip(); + setBuffer(bb); + } + + private static int cmdMap(StoreType t) { + int rv=-1; + switch(t) { + case set: rv=StoreOperationImpl.SETQ; break; + case add: rv=StoreOperationImpl.ADDQ; break; + case replace: rv=StoreOperationImpl.REPLACEQ; break; + } + // Check fall-through. + assert rv != -1 : "Unhandled store type: " + t; + return rv; + } + + @Override + protected void finishedPayload(byte[] pl) throws IOException { + if(responseOpaque == terminalOpaque) { + for(OperationCallback cb : callbacks.values()) { + cb.receivedStatus(STATUS_OK); + cb.complete(); + } + transitionState(OperationState.COMPLETE); + } else { + OperationCallback cb = callbacks.remove(responseOpaque); + assert cb != null : "No callback for " + responseOpaque; + assert errorCode != 0 : "Got no error on a quiet mutation."; + OperationStatus status=getStatusForErrorCode(errorCode, pl); + assert status != null : "Got no status for a quiet mutation error"; + cb.receivedStatus(status); + cb.complete(); + } + resetInput(); + } + + @Override + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + OperationStatus rv=null; + switch(errCode) { + case ERR_EXISTS: + rv=EXISTS_STATUS; + break; + case ERR_NOT_FOUND: + rv=NOT_FOUND_STATUS; + break; + } + return rv; + } + + @Override + protected boolean opaqueIsValid() { + return responseOpaque == terminalOpaque + || callbacks.containsKey(responseOpaque); + } + + static class NoopCallback implements OperationCallback { + + public void complete() { + // noop + } + + public void receivedStatus(OperationStatus status) { + // noop + } + + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/SASLAuthOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/SASLAuthOperationImpl.java new file mode 100644 index 000000000..fd48e2690 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/SASLAuthOperationImpl.java @@ -0,0 +1,29 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.SASLAuthOperation; + +public class SASLAuthOperationImpl extends SASLBaseOperationImpl + implements SASLAuthOperation { + + private final static int CMD = 0x21; + + public SASLAuthOperationImpl(String[] m, String s, + Map p, CallbackHandler h, OperationCallback c) { + super(CMD, m, EMPTY_BYTES, s, p, h, c); + } + + @Override + protected byte[] buildResponse(SaslClient sc) throws SaslException { + return sc.hasInitialResponse() ? + sc.evaluateChallenge(challenge) + : EMPTY_BYTES; + + } +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/SASLBaseOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/SASLBaseOperationImpl.java new file mode 100644 index 000000000..3a1f03722 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/SASLBaseOperationImpl.java @@ -0,0 +1,73 @@ +package net.spy.memcached.protocol.binary; + +import java.io.IOException; +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.OperationStatus; + +public abstract class SASLBaseOperationImpl extends OperationImpl { + + private static final int SASL_CONTINUE=0x21; + + protected final String[] mech; + protected final byte[] challenge; + protected final String serverName; + protected final Map props; + protected final CallbackHandler cbh; + + public SASLBaseOperationImpl(int c, String[] m, byte[] ch, + String s, Map p, CallbackHandler h, + OperationCallback cb) { + super(c, generateOpaque(), cb); + mech = m; + challenge = ch; + serverName = s; + props = p; + cbh = h; + } + + @Override + public void initialize() { + try { + SaslClient sc=Sasl.createSaslClient(mech, null, + "memcached", serverName, props, cbh); + + byte[] response = buildResponse(sc); + String mechanism = sc.getMechanismName(); + + prepareBuffer(mechanism, 0, response); + } catch(SaslException e) { + // XXX: Probably something saner can be done here. + throw new RuntimeException("Can't make SASL go.", e); + } + } + + protected abstract byte[] buildResponse(SaslClient sc) throws SaslException; + + @Override + protected void decodePayload(byte[] pl) { + getLogger().debug("Auth response: %s", new String(pl)); + } + + @Override + protected void finishedPayload(byte[] pl) throws IOException { + if (errorCode == SASL_CONTINUE) { + getCallback().receivedStatus(new OperationStatus(true, + new String(pl))); + transitionState(OperationState.COMPLETE); + } else if(errorCode == 0) { + getCallback().receivedStatus(new OperationStatus(true, "")); + transitionState(OperationState.COMPLETE); + } else { + super.finishedPayload(pl); + } + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/SASLMechsOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/SASLMechsOperationImpl.java new file mode 100644 index 000000000..67a9b9db3 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/SASLMechsOperationImpl.java @@ -0,0 +1,27 @@ +package net.spy.memcached.protocol.binary; + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.SASLMechsOperation; + +class SASLMechsOperationImpl extends OperationImpl implements + SASLMechsOperation { + + private static final int CMD = 0x20; + + public SASLMechsOperationImpl(OperationCallback cb) { + super(CMD, generateOpaque(), cb); + } + + @Override + public void initialize() { + prepareBuffer("", 0, EMPTY_BYTES); + } + + @Override + protected void decodePayload(byte[] pl) { + getCallback().receivedStatus( + new OperationStatus(true, new String(pl))); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/SASLStepOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/SASLStepOperationImpl.java new file mode 100644 index 000000000..030ada910 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/SASLStepOperationImpl.java @@ -0,0 +1,27 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Map; + +import javax.security.auth.callback.CallbackHandler; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.SASLStepOperation; + +public class SASLStepOperationImpl extends SASLBaseOperationImpl + implements SASLStepOperation { + + private final static int CMD = 0x22; + + public SASLStepOperationImpl(String[] m, byte[] ch, String s, + Map p, CallbackHandler h, OperationCallback c) { + super(CMD, m, ch, s, p, h, c); + } + + @Override + protected byte[] buildResponse(SaslClient sc) throws SaslException { + return sc.evaluateChallenge(challenge); + + } +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/StatsOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/StatsOperationImpl.java new file mode 100644 index 000000000..dc846b63a --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/StatsOperationImpl.java @@ -0,0 +1,41 @@ +package net.spy.memcached.protocol.binary; + +import java.io.IOException; + +import net.spy.memcached.ops.OperationState; +import net.spy.memcached.ops.StatsOperation; + +public class StatsOperationImpl extends OperationImpl + implements StatsOperation { + + private static final int CMD = 0x10; + private final String key; + + public StatsOperationImpl(String arg, StatsOperation.Callback c) { + super(CMD, generateOpaque(), c); + key=(arg == null) ? "" : arg; + } + + @Override + public void initialize() { + prepareBuffer(key, 0, EMPTY_BYTES); + } + + @Override + protected void finishedPayload(byte[] pl) throws IOException { + if(keyLen > 0) { + final byte[] keyBytes=new byte[keyLen]; + final byte[] data=new byte[pl.length - keyLen]; + System.arraycopy(pl, 0, keyBytes, 0, keyLen); + System.arraycopy(pl, keyLen, data, 0, pl.length-keyLen); + Callback cb=(Callback)getCallback(); + cb.gotStat(new String(keyBytes, "UTF-8"), + new String(data, "UTF-8")); + } else { + getCallback().receivedStatus(STATUS_OK); + transitionState(OperationState.COMPLETE); + } + resetInput(); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/StoreOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/StoreOperationImpl.java new file mode 100644 index 000000000..b840d7089 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/StoreOperationImpl.java @@ -0,0 +1,103 @@ +package net.spy.memcached.protocol.binary; + +import java.util.Collection; +import java.util.Collections; + +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.ops.StoreType; + +class StoreOperationImpl extends OperationImpl + implements StoreOperation, CASOperation { + + private static final int SET=1; + private static final int ADD=2; + private static final int REPLACE=3; + + static final int SETQ=0x11; + static final int ADDQ=0x12; + static final int REPLACEQ=0x13; + + // 4-byte flags, 4-byte expiration + static final int EXTRA_LEN = 8; + + private final String key; + private final StoreType storeType; + private final int flags; + private final int exp; + private final long cas; + private final byte[] data; + + private static int cmdMap(StoreType t) { + int rv=-1; + switch(t) { + case set: rv=SET; break; + case add: rv=ADD; break; + case replace: rv=REPLACE; break; + } + // Check fall-through. + assert rv != -1 : "Unhandled store type: " + t; + return rv; + } + + public StoreOperationImpl(StoreType t, String k, int f, int e, + byte[] d, long c, OperationCallback cb) { + super(cmdMap(t), generateOpaque(), cb); + key=k; + flags=f; + exp=e; + data=d; + cas=c; + storeType=t; + } + + @Override + public void initialize() { + prepareBuffer(key, cas, data, flags, exp); + } + + @Override + protected OperationStatus getStatusForErrorCode(int errCode, byte[] errPl) { + OperationStatus rv=null; + switch(errCode) { + case ERR_EXISTS: + rv=EXISTS_STATUS; + break; + case ERR_NOT_FOUND: + rv=NOT_FOUND_STATUS; + break; + } + return rv; + } + + public Collection getKeys() { + return Collections.singleton(key); + } + + public byte[] getBytes() { + return data; + } + + public long getCasValue() { + return cas; + } + + public int getExpiration() { + return exp; + } + + public int getFlags() { + return flags; + } + + public byte[] getData() { + return data; + } + + public StoreType getStoreType() { + return storeType; + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/VersionOperationImpl.java b/src/main/java/net/spy/memcached/protocol/binary/VersionOperationImpl.java new file mode 100644 index 000000000..8208baf99 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/VersionOperationImpl.java @@ -0,0 +1,25 @@ +package net.spy.memcached.protocol.binary; + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.VersionOperation; + +class VersionOperationImpl extends OperationImpl implements VersionOperation { + + private static final int CMD = 11; + + public VersionOperationImpl(OperationCallback cb) { + super(CMD, generateOpaque(), cb); + } + + @Override + public void initialize() { + prepareBuffer("", 0, EMPTY_BYTES); + } + + @Override + protected void decodePayload(byte[] pl) { + getCallback().receivedStatus(new OperationStatus(true, new String(pl))); + } + +} diff --git a/src/main/java/net/spy/memcached/protocol/binary/package.html b/src/main/java/net/spy/memcached/protocol/binary/package.html new file mode 100644 index 000000000..07245c93b --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/binary/package.html @@ -0,0 +1,14 @@ + + + + + + Low-level operations for the memcached binary protocol + + + +

Low-level operations for the memcached binary protocol

+ + + diff --git a/src/main/java/net/spy/memcached/protocol/package.html b/src/main/java/net/spy/memcached/protocol/package.html new file mode 100644 index 000000000..dc03ed967 --- /dev/null +++ b/src/main/java/net/spy/memcached/protocol/package.html @@ -0,0 +1,14 @@ + + + + + + Base classes for protocol abstractions. + + + +

Base classes for protocol abstractions.

+ + + diff --git a/src/main/java/net/spy/memcached/transcoders/BaseSerializingTranscoder.java b/src/main/java/net/spy/memcached/transcoders/BaseSerializingTranscoder.java new file mode 100644 index 000000000..6ebb68e76 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/BaseSerializingTranscoder.java @@ -0,0 +1,212 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.transcoders; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.UnsupportedEncodingException; +import java.util.zip.GZIPInputStream; +import java.util.zip.GZIPOutputStream; + +import net.spy.memcached.CachedData; +import net.spy.memcached.compat.CloseUtil; +import net.spy.memcached.compat.SpyObject; + +/** + * Base class for any transcoders that may want to work with serialized or + * compressed data. + */ +public abstract class BaseSerializingTranscoder extends SpyObject { + + /** + * Default compression threshold value. + */ + public static final int DEFAULT_COMPRESSION_THRESHOLD = 16384; + + private static final String DEFAULT_CHARSET = "UTF-8"; + + protected int compressionThreshold=DEFAULT_COMPRESSION_THRESHOLD; + protected String charset=DEFAULT_CHARSET; + + private final int maxSize; + + /** + * Initialize a serializing transcoder with the given maximum data size. + */ + public BaseSerializingTranscoder(int max) { + super(); + maxSize = max; + } + + public boolean asyncDecode(CachedData d) { + return false; + } + + /** + * Set the compression threshold to the given number of bytes. This + * transcoder will attempt to compress any data being stored that's larger + * than this. + * + * @param to the number of bytes + */ + public void setCompressionThreshold(int to) { + compressionThreshold=to; + } + + /** + * Set the character set for string value transcoding (defaults to UTF-8). + */ + public void setCharset(String to) { + // Validate the character set. + try { + new String(new byte[97], to); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + charset=to; + } + + /** + * Get the bytes representing the given serialized object. + */ + protected byte[] serialize(Object o) { + if(o == null) { + throw new NullPointerException("Can't serialize null"); + } + byte[] rv=null; + try { + ByteArrayOutputStream bos=new ByteArrayOutputStream(); + ObjectOutputStream os=new ObjectOutputStream(bos); + os.writeObject(o); + os.close(); + bos.close(); + rv=bos.toByteArray(); + } catch(IOException e) { + throw new IllegalArgumentException("Non-serializable object, cause=" + e.getMessage(), e); + } + return rv; + } + + /** + * Get the object represented by the given serialized bytes. + */ + protected Object deserialize(byte[] in) { + Object rv=null; + try { + if(in != null) { + ByteArrayInputStream bis=new ByteArrayInputStream(in); + ObjectInputStream is=new ObjectInputStream(bis); + rv=is.readObject(); + is.close(); + bis.close(); + } + } catch(IOException e) { + getLogger().warn("Caught IOException decoding %d bytes of data", + in == null ? 0 : in.length, e); + } catch (ClassNotFoundException e) { + getLogger().warn("Caught CNFE decoding %d bytes of data", + in == null ? 0 : in.length, e); + } + return rv; + } + + /** + * Compress the given array of bytes. + */ + protected byte[] compress(byte[] in) { + if(in == null) { + throw new NullPointerException("Can't compress null"); + } + ByteArrayOutputStream bos=new ByteArrayOutputStream(); + GZIPOutputStream gz=null; + try { + gz = new GZIPOutputStream(bos); + gz.write(in); + } catch (IOException e) { + throw new RuntimeException("IO exception compressing data", e); + } finally { + CloseUtil.close(gz); + CloseUtil.close(bos); + } + byte[] rv=bos.toByteArray(); + getLogger().debug("Compressed %d bytes to %d", in.length, rv.length); + return rv; + } + + /** + * Decompress the given array of bytes. + * + * @return null if the bytes cannot be decompressed + */ + protected byte[] decompress(byte[] in) { + ByteArrayOutputStream bos=null; + if(in != null) { + ByteArrayInputStream bis=new ByteArrayInputStream(in); + bos=new ByteArrayOutputStream(); + GZIPInputStream gis; + try { + gis = new GZIPInputStream(bis); + + byte[] buf=new byte[8192]; + int r=-1; + while((r=gis.read(buf)) > 0) { + bos.write(buf, 0, r); + } + } catch (IOException e) { + getLogger().warn("Failed to decompress data", e); + bos = null; + } + } + return bos == null ? null : bos.toByteArray(); + } + + /** + * Decode the string with the current character set. + */ + protected String decodeString(byte[] data) { + String rv=null; + try { + if(data != null) { + rv=new String(data, charset); + } + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + return rv; + } + + /** + * Encode a string into the current character set. + */ + protected byte[] encodeString(String in) { + byte[] rv=null; + try { + rv=in.getBytes(charset); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + return rv; + } + + public int getMaxSize() { + return maxSize; + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/CollectionTranscoder.java b/src/main/java/net/spy/memcached/transcoders/CollectionTranscoder.java new file mode 100644 index 000000000..1ce4e1f43 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/CollectionTranscoder.java @@ -0,0 +1,161 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.transcoders; + +import java.util.Date; + +import net.spy.memcached.CachedData; +import net.spy.memcached.collection.ElementValueType; + +/** + * Transcoder that serialized and compresses objects for collection elements. + */ +public class CollectionTranscoder extends SerializingTranscoder implements + Transcoder { + + /** + * Maximum element size allowed by memcached collections. + */ + public static final int MAX_SIZE = 4 * 1024; + + /** + * Get a serializing transcoder with the default max data size. + */ + public CollectionTranscoder() { + this(MAX_SIZE); + } + + /** + * Get a serializing transcoder that specifies the max data size. + */ + public CollectionTranscoder(int max) { + super(max); + } + + public static int examineFlags(ElementValueType type) { + int flags = 0; + if (type == ElementValueType.STRING) { + } else if (type == ElementValueType.LONG) { + flags |= SPECIAL_LONG; + } else if (type == ElementValueType.INTEGER) { + flags |= SPECIAL_INT; + } else if (type == ElementValueType.BOOLEAN) { + flags |= SPECIAL_BOOLEAN; + } else if (type == ElementValueType.DATE) { + flags |= SPECIAL_DATE; + } else if (type == ElementValueType.BYTE) { + flags |= SPECIAL_BYTE; + } else if (type == ElementValueType.FLOAT) { + flags |= SPECIAL_FLOAT; + } else if (type == ElementValueType.DOUBLE) { + flags |= SPECIAL_DOUBLE; + } else if (type == ElementValueType.BYTEARRAY) { + flags |= SPECIAL_BYTEARRAY; + } else { + flags |= SERIALIZED; + } + return flags; + } + + /* + * (non-Javadoc) + * + * @see net.spy.memcached.Transcoder#decode(net.spy.memcached.CachedData) + */ + public Object decode(CachedData d) { + byte[] data = d.getData(); + Object rv = null; + int flags = d.getFlags() & SPECIAL_MASK; + if ((d.getFlags() & SERIALIZED) != 0 && data != null) { + rv = deserialize(data); + } else if (flags != 0 && data != null) { + switch (flags) { + case SPECIAL_BOOLEAN: + rv = Boolean.valueOf(tu.decodeBoolean(data)); + break; + case SPECIAL_INT: + rv = new Integer(tu.decodeInt(data)); + break; + case SPECIAL_LONG: + rv = new Long(tu.decodeLong(data)); + break; + case SPECIAL_DATE: + rv = new Date(tu.decodeLong(data)); + break; + case SPECIAL_BYTE: + rv = new Byte(tu.decodeByte(data)); + break; + case SPECIAL_FLOAT: + rv = new Float(Float.intBitsToFloat(tu.decodeInt(data))); + break; + case SPECIAL_DOUBLE: + rv = new Double(Double.longBitsToDouble(tu.decodeLong(data))); + break; + case SPECIAL_BYTEARRAY: + rv = data; + break; + default: + getLogger().warn("Undecodeable with flags %x", flags); + } + } else { + rv = decodeString(data); + } + return rv; + } + + /* + * (non-Javadoc) + * + * @see net.spy.memcached.Transcoder#encode(java.lang.Object) + */ + public CachedData encode(Object o) { + byte[] b = null; + int flags = 0; + if (o instanceof String) { + b = encodeString((String) o); + } else if (o instanceof Long) { + b = tu.encodeLong((Long) o); + flags |= SPECIAL_LONG; + } else if (o instanceof Integer) { + b = tu.encodeInt((Integer) o); + flags |= SPECIAL_INT; + } else if (o instanceof Boolean) { + b = tu.encodeBoolean((Boolean) o); + flags |= SPECIAL_BOOLEAN; + } else if (o instanceof Date) { + b = tu.encodeLong(((Date) o).getTime()); + flags |= SPECIAL_DATE; + } else if (o instanceof Byte) { + b = tu.encodeByte((Byte) o); + flags |= SPECIAL_BYTE; + } else if (o instanceof Float) { + b = tu.encodeInt(Float.floatToRawIntBits((Float) o)); + flags |= SPECIAL_FLOAT; + } else if (o instanceof Double) { + b = tu.encodeLong(Double.doubleToRawLongBits((Double) o)); + flags |= SPECIAL_DOUBLE; + } else if (o instanceof byte[]) { + b = (byte[]) o; + flags |= SPECIAL_BYTEARRAY; + } else { + b = serialize(o); + flags |= SERIALIZED; + } + assert b != null; + return new CachedData(flags, b, getMaxSize()); + } +} diff --git a/src/main/java/net/spy/memcached/transcoders/InspectObjectSizeTranscoder.java b/src/main/java/net/spy/memcached/transcoders/InspectObjectSizeTranscoder.java new file mode 100644 index 000000000..35827fa43 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/InspectObjectSizeTranscoder.java @@ -0,0 +1,41 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.transcoders; + +import net.spy.memcached.CachedData; + +public class InspectObjectSizeTranscoder extends SerializingTranscoder { + + public static interface LoggingObjectSize { + void histogram(int size); + } + + private final LoggingObjectSize objSizeLogger; + + public InspectObjectSizeTranscoder(LoggingObjectSize objSizeLogger) { + this.objSizeLogger = objSizeLogger; + } + + @Override + public CachedData encode(Object o) { + CachedData encoded = super.encode(o); + + objSizeLogger.histogram(encoded.getData().length); + + return encoded; + } +} diff --git a/src/main/java/net/spy/memcached/transcoders/IntegerTranscoder.java b/src/main/java/net/spy/memcached/transcoders/IntegerTranscoder.java new file mode 100644 index 000000000..27dcc4483 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/IntegerTranscoder.java @@ -0,0 +1,38 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +import net.spy.memcached.CachedData; +import net.spy.memcached.compat.SpyObject; + +/** + * Transcoder that serializes and unserializes longs. + */ +public final class IntegerTranscoder extends SpyObject + implements Transcoder { + + private static final int flags = SerializingTranscoder.SPECIAL_INT; + + private final TranscoderUtils tu=new TranscoderUtils(true); + + public boolean asyncDecode(CachedData d) { + return false; + } + + public CachedData encode(java.lang.Integer l) { + return new CachedData(flags, tu.encodeInt(l), getMaxSize()); + } + + public Integer decode(CachedData d) { + if (flags == d.getFlags()) { + return tu.decodeInt(d.getData()); + } else { + return null; + } + } + + public int getMaxSize() { + return CachedData.MAX_SIZE; + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/LongTranscoder.java b/src/main/java/net/spy/memcached/transcoders/LongTranscoder.java new file mode 100644 index 000000000..f84b622ac --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/LongTranscoder.java @@ -0,0 +1,40 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +import net.spy.memcached.CachedData; +import net.spy.memcached.compat.SpyObject; + +/** + * Transcoder that serializes and unserializes longs. + */ +public final class LongTranscoder extends SpyObject + implements Transcoder { + + private static final int flags = SerializingTranscoder.SPECIAL_LONG; + + private final TranscoderUtils tu=new TranscoderUtils(true); + + public boolean asyncDecode(CachedData d) { + return false; + } + + public CachedData encode(java.lang.Long l) { + return new CachedData(flags, tu.encodeLong(l), getMaxSize()); + } + + public Long decode(CachedData d) { + if (flags == d.getFlags()) { + return tu.decodeLong(d.getData()); + } else { + getLogger().error("Unexpected flags for long: " + + d.getFlags() + " wanted " + flags); + return null; + } + } + + public int getMaxSize() { + return CachedData.MAX_SIZE; + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/SerializingTranscoder.java b/src/main/java/net/spy/memcached/transcoders/SerializingTranscoder.java new file mode 100644 index 000000000..a29ee130f --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/SerializingTranscoder.java @@ -0,0 +1,171 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +import java.util.Date; + +import net.spy.memcached.CachedData; + +/** + * Transcoder that serializes and compresses objects. + */ +public class SerializingTranscoder extends BaseSerializingTranscoder + implements Transcoder { + + // General flags + static final int SERIALIZED=1; + static final int COMPRESSED=2; + + // Special flags for specially handled types. + protected static final int SPECIAL_MASK=0xff00; + static final int SPECIAL_BOOLEAN=(1<<8); + static final int SPECIAL_INT=(2<<8); + static final int SPECIAL_LONG=(3<<8); + static final int SPECIAL_DATE=(4<<8); + static final int SPECIAL_BYTE=(5<<8); + static final int SPECIAL_FLOAT=(6<<8); + static final int SPECIAL_DOUBLE=(7<<8); + static final int SPECIAL_BYTEARRAY=(8<<8); + + protected final TranscoderUtils tu=new TranscoderUtils(true); + + /** + * Get a serializing transcoder with the default max data size. + */ + public SerializingTranscoder() { + this(CachedData.MAX_SIZE); + } + + /** + * Get a serializing transcoder that specifies the max data size. + */ + public SerializingTranscoder(int max) { + super(max); + } + + @Override + public boolean asyncDecode(CachedData d) { + if((d.getFlags() & COMPRESSED) != 0 + || (d.getFlags() & SERIALIZED) != 0 ) { + return true; + } + return super.asyncDecode(d); + } + + /* (non-Javadoc) + * @see net.spy.memcached.Transcoder#decode(net.spy.memcached.CachedData) + */ + public Object decode(CachedData d) { + byte[] data=d.getData(); + Object rv=null; + if((d.getFlags() & COMPRESSED) != 0) { + data=decompress(d.getData()); + } + int flags=d.getFlags() & SPECIAL_MASK; + if((d.getFlags() & SERIALIZED) != 0 && data != null) { + rv=deserialize(data); + } else if(flags != 0 && data != null) { + switch(flags) { + case SPECIAL_BOOLEAN: + rv=Boolean.valueOf(tu.decodeBoolean(data)); + break; + case SPECIAL_INT: + rv=new Integer(tu.decodeInt(data)); + break; + case SPECIAL_LONG: + rv=new Long(tu.decodeLong(data)); + break; + case SPECIAL_DATE: + rv=new Date(tu.decodeLong(data)); + break; + case SPECIAL_BYTE: + rv=new Byte(tu.decodeByte(data)); + break; + case SPECIAL_FLOAT: + rv=new Float(Float.intBitsToFloat(tu.decodeInt(data))); + break; + case SPECIAL_DOUBLE: + rv=new Double(Double.longBitsToDouble(tu.decodeLong(data))); + break; + case SPECIAL_BYTEARRAY: + rv=data; + break; + default: + getLogger().warn("Undecodeable with flags %x", flags); + } + } else { + rv=decodeString(data); + } + return rv; + } + + /* (non-Javadoc) + * @see net.spy.memcached.Transcoder#encode(java.lang.Object) + */ + public CachedData encode(Object o) { + byte[] b=null; + int flags=0; + if(o instanceof String) { + b=encodeString((String)o); + } else if(o instanceof Long) { + b=tu.encodeLong((Long)o); + flags |= SPECIAL_LONG; + } else if(o instanceof Integer) { + b=tu.encodeInt((Integer)o); + flags |= SPECIAL_INT; + } else if(o instanceof Boolean) { + b=tu.encodeBoolean((Boolean)o); + flags |= SPECIAL_BOOLEAN; + } else if(o instanceof Date) { + b=tu.encodeLong(((Date)o).getTime()); + flags |= SPECIAL_DATE; + } else if(o instanceof Byte) { + b=tu.encodeByte((Byte)o); + flags |= SPECIAL_BYTE; + } else if(o instanceof Float) { + b=tu.encodeInt(Float.floatToRawIntBits((Float)o)); + flags |= SPECIAL_FLOAT; + } else if(o instanceof Double) { + b=tu.encodeLong(Double.doubleToRawLongBits((Double)o)); + flags |= SPECIAL_DOUBLE; + } else if(o instanceof byte[]) { + b=(byte[])o; + flags |= SPECIAL_BYTEARRAY; + } else { + b=serialize(o); + flags |= SERIALIZED; + } + assert b != null; + if(b.length > compressionThreshold) { + byte[] compressed=compress(b); + if(compressed.length < b.length) { + getLogger().debug("Compressed %s from %d to %d", + o.getClass().getName(), b.length, compressed.length); + b=compressed; + flags |= COMPRESSED; + } else { + getLogger().info( + "Compression increased the size of %s from %d to %d", + o.getClass().getName(), b.length, compressed.length); + } + } + return new CachedData(flags, b, getMaxSize()); + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/TranscodeService.java b/src/main/java/net/spy/memcached/transcoders/TranscodeService.java new file mode 100644 index 000000000..733180eea --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/TranscodeService.java @@ -0,0 +1,94 @@ +package net.spy.memcached.transcoders; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.FutureTask; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +import net.spy.memcached.CachedData; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.internal.BasicThreadFactory; + +/** + * Asynchronous transcoder. + */ +public class TranscodeService extends SpyObject { + + private final ThreadPoolExecutor pool; + + public TranscodeService(boolean daemon) { + pool = new ThreadPoolExecutor(1, 10, 60L, + TimeUnit.MILLISECONDS, new ArrayBlockingQueue(100), + new BasicThreadFactory("transcoder", daemon), + new ThreadPoolExecutor.DiscardPolicy()); + } + + /** + * Perform a decode. + */ + public Future decode(final Transcoder tc, + final CachedData cachedData) { + + assert !pool.isShutdown() : "Pool has already shut down."; + + TranscodeService.Task task = new TranscodeService.Task( + new Callable() { + public T call() { + return tc.decode(cachedData); + } + }); + + if (tc.asyncDecode(cachedData)) { + this.pool.execute(task); + } + return task; + } + + /** + * Shut down the pool. + */ + public void shutdown() { + pool.shutdown(); + } + + /** + * Ask whether this service has been shut down. + */ + public boolean isShutdown() { + return pool.isShutdown(); + } + + private static class Task extends FutureTask { + private final AtomicBoolean isRunning = new AtomicBoolean(false); + + public Task(Callable callable) { + super(callable); + } + + @Override + public T get() throws InterruptedException, ExecutionException { + this.run(); + return super.get(); + } + + @Override + public T get(long timeout, TimeUnit unit) throws InterruptedException, + ExecutionException, TimeoutException { + this.run(); + return super.get(timeout, unit); + } + + @Override + public void run() { + if (this.isRunning.compareAndSet(false, true)) { + super.run(); + } + } + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/Transcoder.java b/src/main/java/net/spy/memcached/transcoders/Transcoder.java new file mode 100644 index 000000000..b095d9344 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/Transcoder.java @@ -0,0 +1,39 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +import net.spy.memcached.CachedData; + +/** + * Transcoder is an interface for classes that convert between byte arrays and + * objects for storage in the cache. + */ +public interface Transcoder { + + /** + * Should the transcoder be run asyncronously. + * @return True if the CachedData should be decoded Asyncronously + */ + boolean asyncDecode(CachedData d); + + /** + * Encode the given object for storage. + * + * @param o the object + * @return the CachedData representing what should be sent + */ + CachedData encode(T o); + + /** + * Decode the cached object into the object it represents. + * + * @param d the data + * @return the return value + */ + T decode(CachedData d); + + /** + * Get the maximum size of objects handled by this transcoder. + */ + int getMaxSize(); +} diff --git a/src/main/java/net/spy/memcached/transcoders/TranscoderUtils.java b/src/main/java/net/spy/memcached/transcoders/TranscoderUtils.java new file mode 100644 index 000000000..49ad0568d --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/TranscoderUtils.java @@ -0,0 +1,88 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +/** + * Utility class for transcoding Java types. + */ +public final class TranscoderUtils { + + private final boolean packZeros; + + /** + * Get an instance of TranscoderUtils. + * + * @param pack if true, remove all zero bytes from the MSB of the packed num + */ + public TranscoderUtils(boolean pack) { + super(); + packZeros=pack; + } + + public byte[] encodeNum(long l, int maxBytes) { + byte[] rv=new byte[maxBytes]; + for(int i=0; i> (8 * i)) & 0xff); + } + if(packZeros) { + int firstNon0=0; + for(;firstNon0 0) { + byte[] tmp=new byte[rv.length - firstNon0]; + System.arraycopy(rv, firstNon0, tmp, 0, rv.length-firstNon0); + rv=tmp; + } + } + return rv; + } + + public byte[] encodeLong(long l) { + return encodeNum(l, 8); + } + + public long decodeLong(byte[] b) { + long rv=0; + for(byte i : b) { + rv = (rv << 8) | (i<0?256+i:i); + } + return rv; + } + + public byte[] encodeInt(int in) { + return encodeNum(in, 4); + } + + public int decodeInt(byte[] in) { + assert in.length <= 4 + : "Too long to be an int (" + in.length + ") bytes"; + return (int)decodeLong(in); + } + + public byte[] encodeByte(byte in) { + return new byte[]{in}; + } + + public byte decodeByte(byte[] in) { + assert in.length <= 1 : "Too long for a byte"; + byte rv=0; + if(in.length == 1) { + rv=in[0]; + } + return rv; + } + + public byte[] encodeBoolean(boolean b) { + byte[] rv=new byte[1]; + rv[0]=(byte)(b?'1':'0'); + return rv; + } + + public boolean decodeBoolean(byte[] in) { + assert in.length == 1 : "Wrong length for a boolean"; + return in[0] == '1'; + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/WhalinTranscoder.java b/src/main/java/net/spy/memcached/transcoders/WhalinTranscoder.java new file mode 100644 index 000000000..61ab2d342 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/WhalinTranscoder.java @@ -0,0 +1,190 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.transcoders; + +import java.util.Date; + +import net.spy.memcached.CachedData; + +/** + * Transcoder that provides compatibility with Greg Whalin's memcached client. + */ +public class WhalinTranscoder extends BaseSerializingTranscoder + implements Transcoder { + + static final int SPECIAL_BYTE = 1; + static final int SPECIAL_BOOLEAN = 8192; + static final int SPECIAL_INT = 4; + static final int SPECIAL_LONG = 16384; + static final int SPECIAL_CHARACTER = 16; + static final int SPECIAL_STRING = 32; + static final int SPECIAL_STRINGBUFFER = 64; + static final int SPECIAL_FLOAT = 128; + static final int SPECIAL_SHORT = 256; + static final int SPECIAL_DOUBLE = 512; + static final int SPECIAL_DATE = 1024; + static final int SPECIAL_STRINGBUILDER = 2048; + static final int SPECIAL_BYTEARRAY = 4096; + static final int COMPRESSED = 2; + static final int SERIALIZED = 8; + + private final TranscoderUtils tu=new TranscoderUtils(false); + + public WhalinTranscoder() { + super(CachedData.MAX_SIZE); + } + + /* (non-Javadoc) + * @see net.spy.memcached.Transcoder#decode(net.spy.memcached.CachedData) + */ + public Object decode(CachedData d) { + byte[] data=d.getData(); + Object rv=null; + if((d.getFlags() & COMPRESSED) != 0) { + data=decompress(d.getData()); + } + if((d.getFlags() & SERIALIZED) != 0) { + rv=deserialize(data); + } else { + int f=d.getFlags() & ~COMPRESSED; + switch(f) { + case SPECIAL_BOOLEAN: + rv=Boolean.valueOf(this.decodeBoolean(data)); + break; + case SPECIAL_INT: + rv=new Integer(tu.decodeInt(data)); + break; + case SPECIAL_SHORT: + rv=new Short((short)tu.decodeInt(data)); + break; + case SPECIAL_LONG: + rv=new Long(tu.decodeLong(data)); + break; + case SPECIAL_DATE: + rv=new Date(tu.decodeLong(data)); + break; + case SPECIAL_BYTE: + rv=new Byte(tu.decodeByte(data)); + break; + case SPECIAL_FLOAT: + rv=new Float(Float.intBitsToFloat(tu.decodeInt(data))); + break; + case SPECIAL_DOUBLE: + rv=new Double(Double.longBitsToDouble(tu.decodeLong(data))); + break; + case SPECIAL_BYTEARRAY: + rv=data; + break; + case SPECIAL_STRING: + rv = decodeString(data); + break; + case SPECIAL_STRINGBUFFER: + rv=new StringBuffer(decodeString(data)); + break; + case SPECIAL_STRINGBUILDER: + rv=new StringBuilder(decodeString(data)); + break; + case SPECIAL_CHARACTER: + rv = decodeCharacter(data); + break; + default: + getLogger().warn("Cannot handle data with flags %x", f); + } + } + return rv; + } + + public CachedData encode(Object o) { + byte[] b=null; + int flags=0; + if(o instanceof String) { + b=encodeString((String)o); + flags |= SPECIAL_STRING; + } else if(o instanceof StringBuffer) { + flags |= SPECIAL_STRINGBUFFER; + b=encodeString(String.valueOf(o)); + } else if(o instanceof StringBuilder) { + flags |= SPECIAL_STRINGBUILDER; + b=encodeString(String.valueOf(o)); + } else if(o instanceof Long) { + b=tu.encodeLong((Long)o); + flags |= SPECIAL_LONG; + } else if(o instanceof Integer) { + b=tu.encodeInt((Integer)o); + flags |= SPECIAL_INT; + } else if(o instanceof Short) { + b=tu.encodeInt((Short)o); + flags |= SPECIAL_SHORT; + } else if(o instanceof Boolean) { + b=this.encodeBoolean((Boolean)o); + flags |= SPECIAL_BOOLEAN; + } else if(o instanceof Date) { + b=tu.encodeLong(((Date)o).getTime()); + flags |= SPECIAL_DATE; + } else if(o instanceof Byte) { + b=tu.encodeByte((Byte)o); + flags |= SPECIAL_BYTE; + } else if(o instanceof Float) { + b=tu.encodeInt(Float.floatToIntBits((Float)o)); + flags |= SPECIAL_FLOAT; + } else if(o instanceof Double) { + b=tu.encodeLong(Double.doubleToLongBits((Double)o)); + flags |= SPECIAL_DOUBLE; + } else if(o instanceof byte[]) { + b=(byte[])o; + flags |= SPECIAL_BYTEARRAY; + } else if (o instanceof Character) { + b = tu.encodeInt((Character) o); + flags |= SPECIAL_CHARACTER; + } else { + b=serialize(o); + flags |= SERIALIZED; + } + assert b != null; + if(b.length > compressionThreshold) { + byte[] compressed=compress(b); + if(compressed.length < b.length) { + getLogger().debug("Compressed %s from %d to %d", + o.getClass().getName(), b.length, compressed.length); + b=compressed; + flags |= COMPRESSED; + } else { + getLogger().info( + "Compression increased the size of %s from %d to %d", + o.getClass().getName(), b.length, compressed.length); + } + } + return new CachedData(flags, b, getMaxSize()); + } + + protected Character decodeCharacter(byte[] b){ + return Character.valueOf((char)tu.decodeInt(b)); + } + + public byte[] encodeBoolean(boolean b){ + byte[] rv = new byte[1]; + rv[0] = (byte) (b ? 1 : 0); + return rv; + } + + public boolean decodeBoolean(byte[] in) { + assert in.length == 1 : "Wrong length for a boolean"; + return in[0] == 1; + } + + +} diff --git a/src/main/java/net/spy/memcached/transcoders/WhalinV1Transcoder.java b/src/main/java/net/spy/memcached/transcoders/WhalinV1Transcoder.java new file mode 100644 index 000000000..c18204adc --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/WhalinV1Transcoder.java @@ -0,0 +1,284 @@ +package net.spy.memcached.transcoders; + +import java.io.UnsupportedEncodingException; +import java.util.Date; + +import net.spy.memcached.CachedData; + +/** + * Handles old whalin (tested with v1.6) encoding: data type is in the first + * byte of the value. + * + * @author bpartensky + * @since Oct 16, 2008 + */ +public class WhalinV1Transcoder extends BaseSerializingTranscoder + implements Transcoder { + + public static final int SPECIAL_BYTE = 1; + public static final int SPECIAL_BOOLEAN = 2; + public static final int SPECIAL_INTEGER = 3; + public static final int SPECIAL_LONG = 4; + public static final int SPECIAL_CHARACTER = 5; + public static final int SPECIAL_STRING = 6; + public static final int SPECIAL_STRINGBUFFER = 7; + public static final int SPECIAL_FLOAT = 8; + public static final int SPECIAL_SHORT = 9; + public static final int SPECIAL_DOUBLE = 10; + public static final int SPECIAL_DATE = 11; + public static final int SPECIAL_STRINGBUILDER = 12; + public static final int COMPRESSED = 2; + public static final int SERIALIZED = 8; + + public WhalinV1Transcoder() { + super(CachedData.MAX_SIZE); + } + + public CachedData encode(Object o) { + byte[] b = null; + int flags = 0; + if (o instanceof String) { + b = encodeW1String((String) o); + } else if (o instanceof StringBuffer) { + b = encodeStringBuffer((StringBuffer) o); + } else if (o instanceof StringBuilder) { + b = encodeStringbuilder((StringBuilder) o); + } else if (o instanceof Long) { + b = encodeLong((Long) o); + } else if (o instanceof Integer) { + b = encodeInteger((Integer) o); + } else if (o instanceof Short) { + b = encodeShort((Short) o); + } else if (o instanceof Boolean) { + b = encodeBoolean((Boolean) o); + } else if (o instanceof Date) { + b = encodeLong(((Date) o).getTime(), SPECIAL_DATE); + } else if (o instanceof Byte) { + b = encodeByte((Byte) o); + } else if (o instanceof Float) { + b = encodeFloat((Float) o); + } else if (o instanceof Double) { + b = encodeDouble((Double) o); + } else if (o instanceof Character) { + b = encodeCharacter((Character) o); + } else { + b = serialize(o); + flags |= SERIALIZED; + } + assert b != null; + if (b.length > compressionThreshold) { + byte[] compressed = compress(b); + if (compressed.length < b.length) { + getLogger().info("Compressed %s from %d to %d", + o.getClass().getName(), b.length, compressed.length); + b = compressed; + flags |= COMPRESSED; + } else { + getLogger().info( + "Compression increased the size of %s from %d to %d", + o.getClass().getName(), b.length, compressed.length); + } + } + return new CachedData(flags, b, getMaxSize()); + } + + public Object decode(CachedData d) { + byte[] data = d.getData(); + Object rv = null; + if ((d.getFlags() & COMPRESSED) != 0) { + data = decompress(d.getData()); + } + if ((d.getFlags() & SERIALIZED) != 0) { + rv = deserialize(data); + } else { + int f = data[0]; + switch (f) { + case SPECIAL_BOOLEAN: + rv = decodeBoolean(data); + break; + case SPECIAL_INTEGER: + rv = decodeInteger(data); + break; + case SPECIAL_SHORT: + rv = decodeShort(data); + break; + case SPECIAL_LONG: + rv = decodeLong(data); + break; + case SPECIAL_DATE: + rv = new Date(decodeLong(data)); + break; + case SPECIAL_BYTE: + rv = decodeByte(data); + break; + case SPECIAL_FLOAT: + rv = decodeFloat(data); + break; + case SPECIAL_DOUBLE: + rv = decodeDouble(data); + break; + case SPECIAL_STRING: + rv = decodeW1String(data); + break; + case SPECIAL_STRINGBUFFER: + rv = new StringBuffer(decodeW1String(data)); + break; + case SPECIAL_STRINGBUILDER: + rv = new StringBuilder(decodeW1String(data)); + break; + case SPECIAL_CHARACTER: + rv = decodeCharacter(data); + break; + default: + getLogger().warn("Cannot handle data with flags %x", f); + } + } + return rv; + } + + private Short decodeShort(byte[] data) { + return Short.valueOf((short) decodeInteger(data).intValue()); + } + + private Byte decodeByte(byte[] in) { + assert in.length == 2 : "Wrong length for a byte"; + byte value = in[1]; + return Byte.valueOf(value); + + } + + private Integer decodeInteger(byte[] in) { + assert in.length == 5 : "Wrong length for an int"; + return Integer.valueOf((int) decodeLong(in).longValue()); + + } + + private Float decodeFloat(byte[] in) { + assert in.length == 5 : "Wrong length for a float"; + Integer l = decodeInteger(in); + return Float.valueOf(Float.intBitsToFloat(l.intValue())); + } + + private Double decodeDouble(byte[] in) { + assert in.length == 9 : "Wrong length for a double"; + Long l = decodeLong(in); + return Double.valueOf(Double.longBitsToDouble(l.longValue())); + } + + private Boolean decodeBoolean(byte[] in) { + assert in.length == 2 : "Wrong length for a boolean"; + return Boolean.valueOf(in[1] == 1); + } + + private Long decodeLong(byte[] in) { + long rv = 0L; + for (int idx = 1; idx < in.length; idx++) { + byte i = in[idx]; + rv = (rv << 8) | (i < 0 ? 256 + i : i); + } + return Long.valueOf(rv); + } + + private Character decodeCharacter(byte[] b) { + return Character.valueOf((char) decodeInteger(b).intValue()); + } + + private String decodeW1String(byte[] b) { + try { + return new String(b, 1, b.length - 1, charset); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + private byte[] encodeByte(Byte value) { + byte[] b = new byte[2]; + b[0] = SPECIAL_BYTE; + b[1] = value.byteValue(); + return b; + } + + private byte[] encodeBoolean(Boolean value) { + byte[] b = new byte[2]; + b[0] = SPECIAL_BOOLEAN; + b[1] = (byte) (value.booleanValue() ? 1 : 0); + return b; + } + + private byte[] encodeInteger(Integer value) { + byte[] b = encodeNum(value, 4); + b[0] = SPECIAL_INTEGER; + return b; + } + + private byte[] encodeLong(Long value, int type) { + byte[] b = encodeNum(value, 8); + b[0] = (byte)type; + return b; + } + + private byte[] encodeLong(Long value) { + return encodeLong(value, SPECIAL_LONG); + } + + private byte[] encodeShort(Short value) { + byte[] b = encodeInteger((int) value.shortValue()); + b[0] = SPECIAL_SHORT; + return b; + } + + private byte[] encodeFloat(Float value) { + byte[] b = encodeInteger(Float.floatToIntBits(value)); + b[0] = SPECIAL_FLOAT; + return b; + } + + private byte[] encodeDouble(Double value) { + byte[] b = encodeLong(Double.doubleToLongBits(value)); + b[0] = SPECIAL_DOUBLE; + return b; + } + + private byte[] encodeCharacter(Character value) { + byte[] result = encodeInteger((int) value.charValue()); + result[0] = SPECIAL_CHARACTER; + return result; + } + + private byte[] encodeStringBuffer(StringBuffer value) { + byte[] b = encodeW1String(value.toString()); + b[0] = SPECIAL_STRINGBUFFER; + return b; + } + + private byte[] encodeStringbuilder(StringBuilder value) { + byte[] b = encodeW1String(value.toString()); + b[0] = SPECIAL_STRINGBUILDER; + return b; + } + + private byte[] encodeW1String(String value) { + byte[] svalue = null; + try { + svalue = value.getBytes(charset); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + byte[] result = new byte[svalue.length + 1]; + System.arraycopy(svalue, 0, result, 1, svalue.length); + result[0] = SPECIAL_STRING; + return result; + } + + private byte[] encodeNum(long l, int maxBytes) { + byte[] rv = new byte[maxBytes + 1]; + + for (int i = 0; i < rv.length - 1; i++) { + int pos = rv.length - i - 1; + rv[pos] = (byte) ((l >> (8 * i)) & 0xff); + } + + return rv; + } + +} diff --git a/src/main/java/net/spy/memcached/transcoders/package.html b/src/main/java/net/spy/memcached/transcoders/package.html new file mode 100644 index 000000000..3459a7c24 --- /dev/null +++ b/src/main/java/net/spy/memcached/transcoders/package.html @@ -0,0 +1,14 @@ + + + + + + Transcoders convert data to and from data and flags + + + +

Classes that deal with data encoding

+ + + diff --git a/src/main/java/net/spy/memcached/util/ArcusKetamaNodeLocatorConfiguration.java b/src/main/java/net/spy/memcached/util/ArcusKetamaNodeLocatorConfiguration.java new file mode 100644 index 000000000..f2279f7a6 --- /dev/null +++ b/src/main/java/net/spy/memcached/util/ArcusKetamaNodeLocatorConfiguration.java @@ -0,0 +1,32 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.util; + +import net.spy.memcached.MemcachedNode; + +public class ArcusKetamaNodeLocatorConfiguration extends + DefaultKetamaNodeLocatorConfiguration { + + /** + * Removes a node from the internal node-address map. + * @param node + */ + public void removeNode(MemcachedNode node) { + super.socketAddresses.remove(node); + } + +} diff --git a/src/main/java/net/spy/memcached/util/BTreeUtil.java b/src/main/java/net/spy/memcached/util/BTreeUtil.java new file mode 100644 index 000000000..8bbca5b10 --- /dev/null +++ b/src/main/java/net/spy/memcached/util/BTreeUtil.java @@ -0,0 +1,78 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.util; + + +public class BTreeUtil { + + private static final String HEXES = "0123456789ABCDEF"; + + public static String toHex(byte[] byteArray) { + if (byteArray == null) + return null; + + final StringBuilder hex = new StringBuilder(2 * byteArray.length + 2); + + hex.append("0x"); + + for (final byte b : byteArray) { + hex.append(HEXES.charAt((b & 0xF0) >> 4)); + hex.append(HEXES.charAt((b & 0x0F))); + } + + return hex.toString(); + } + + public static byte[] hexStringToByteArrays(String str) { + if (str == null) { + return null; + } + + if (str.startsWith("0x")) { + str = str.substring(2); + } + + if (str.length() == 0) { + return new byte[0]; + } + + if (str.length() % 2 != 0) { + throw new IllegalArgumentException("Invalid hex string."); + } + + int len = str.length(); + byte[] data = new byte[len / 2]; + + for (int i = 0; i < len; i += 2) { + data[i / 2] = (byte) ((Character.digit(str.charAt(i), 16) << 4) + Character + .digit(str.charAt(i + 1), 16)); + } + + return data; + } + + public static int compareByteArraysInLexOrder(byte[] array1, byte[] array2) { + int diff; + for (int i = 0; i < array1.length && i < array2.length; i++) { + diff = (array1[i] & 0xFF) - (array2[i] & 0xFF); + if (diff != 0) { + return diff; + } + } + return array1.length - array2.length; + } +} diff --git a/src/main/java/net/spy/memcached/util/CacheLoader.java b/src/main/java/net/spy/memcached/util/CacheLoader.java new file mode 100644 index 000000000..fbc644417 --- /dev/null +++ b/src/main/java/net/spy/memcached/util/CacheLoader.java @@ -0,0 +1,147 @@ +package net.spy.memcached.util; + +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.MemcachedClientIF; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.internal.ImmediateFuture; + +/** + * CacheLoader provides efficient mechanisms for storing lots of data. + */ +public class CacheLoader extends SpyObject { + + private final ExecutorService executorService; + private final StorageListener storageListener; + private final MemcachedClientIF client; + private final int expiration; + + /** + * Simple CacheLoader constructor that doesn't provide any feedback and + * caches forever. + * + * @param c a client + */ + public CacheLoader(MemcachedClientIF c) { + this(c, null, null, 0); + } + + /** + * Get a CacheLoader with all the options. + * + * @param c a client + * @param es an ExecutorService (e.g. thread pool) to dispatch results + * (may be null, in which case no listener may be provided) + * @param sl a storage listener (may be null) + * @param exp expiration to use while loading + */ + public CacheLoader(MemcachedClientIF c, ExecutorService es, + StorageListener sl, int exp) { + super(); + client = c; + executorService = es; + storageListener = sl; + expiration = exp; + } + + /** + * Load data from the given iterator. + * + * @param type of data being loaded + * @param i the iterator of data to load + */ + public Future loadData(Iterator> i) { + Future mostRecent = null; + while(i.hasNext()) { + Map.Entry e = i.next(); + mostRecent = push(e.getKey(), e.getValue()); + watch(e.getKey(), mostRecent); + } + + return mostRecent == null ? new ImmediateFuture(true) : mostRecent; + } + + /** + * Load data from the given map. + * + * @param type of data being loaded + * @param map the map of keys to values that needs to be loaded + */ + public Future loadData(Map map) { + return loadData(map.entrySet().iterator()); + } + + /** + * Push a value into the cache. + * + * This is a wrapper around set that throttles and retries on full queues. + * + * @param the type being stored + * @param k the key + * @param value the value + * @return the future representing the stored data + */ + public Future push(String k, T value) { + Future rv = null; + while(rv == null) { + try { + rv = client.set(k, expiration, value); + } catch(IllegalStateException ex) { + // Need to slow down a bit when we start getting rejections. + try { + if(rv != null) { + rv.get(250, TimeUnit.MILLISECONDS); + } else { + Thread.sleep(250); + } + } catch(InterruptedException ie) { + Thread.currentThread().interrupt(); + } catch(Exception e2) { + // Ignore exceptions here. We're just trying to slow + // down input. + } + } + + } + return rv; + } + + private void watch(final String key, final Future f) { + if(executorService != null && storageListener != null) { + executorService.execute(new Runnable() { + public void run() { + try { + storageListener.storeResult(key, f.get()); + } catch(Exception e) { + storageListener.errorStoring(key, e); + } + } + }); + } + } + + /** + * If you are interested in the results of your data load, this interface + * will receive them. + */ + public interface StorageListener { + + /** + * Normal path response for a set. + * + * @param k the key that was being set + * @param result true if the set changed the DB value + */ + void storeResult(String k, boolean result); + + /** + * @param k the key that was attempting to be stored + * @param e the error received while storing that key + */ + void errorStoring(String k, Exception e); + } +} diff --git a/src/main/java/net/spy/memcached/util/DefaultKetamaNodeLocatorConfiguration.java b/src/main/java/net/spy/memcached/util/DefaultKetamaNodeLocatorConfiguration.java new file mode 100644 index 000000000..80d0cd620 --- /dev/null +++ b/src/main/java/net/spy/memcached/util/DefaultKetamaNodeLocatorConfiguration.java @@ -0,0 +1,95 @@ +package net.spy.memcached.util; + +import java.util.HashMap; +import java.util.Map; + +import net.spy.memcached.MemcachedNode; + +/** + * A Default implementation of the configuration required for the + * KetamaNodeLocator algorithm to run. + */ +public class DefaultKetamaNodeLocatorConfiguration implements + KetamaNodeLocatorConfiguration { + + final int NUM_REPS=160; + + // Internal lookup map to try to carry forward the optimisation that was + // previously in KetamaNodeLocator + protected Map socketAddresses= + new HashMap(); + + /** + * Returns the socket address of a given MemcachedNode. + * + * @param node The node which we're interested in + * @return String the socket address of that node. + */ + protected String getSocketAddressForNode(MemcachedNode node) { + // Using the internal map retrieve the socket addresses + // for given nodes. + // I'm aware that this code is inherently thread-unsafe as + // I'm using a HashMap implementation of the map, but the worst + // case ( I believe) is we're slightly in-efficient when + // a node has never been seen before concurrently on two different + // threads, so it the socketaddress will be requested multiple times! + // all other cases should be as fast as possible. + String result=socketAddresses.get(node); + if(result == null) { + result=String.valueOf(node.getSocketAddress()); + if (result.startsWith("/")) { + result = result.substring(1); + } + socketAddresses.put(node, result); + } + return result; + } + + /** + * Returns the number of discrete hashes that should be defined for each + * node in the continuum. + * + * @return NUM_REPS repetitions. + */ + public int getNodeRepetitions() { + return NUM_REPS; + } + + /** + * Returns a uniquely identifying key, suitable for hashing by the + * KetamaNodeLocator algorithm. + * + *

+ * This default implementation uses the socket-address of the MemcachedNode + * and concatenates it with a hyphen directly against the repetition number + * for example a key for a particular server's first repetition may look + * like: + *

+ * + *

+ * myhost/10.0.2.1-0 + *

+ * + *

for the second repetition

+ * + *

myhost/10.0.2.1-1

+ * + *

+ * for a server where reverse lookups are failing the returned keys + * may look like + *

+ * + *

+ * /10.0.2.1-0 and /10.0.2.1-1 + *

+ * + * @param node The MemcachedNode to use to form the unique identifier + * @param repetition The repetition number for the particular node in + * question (0 is the first repetition) + * @return The key that represents the specific repetition of the + * node + */ + public String getKeyForNode(MemcachedNode node, int repetition) { + return getSocketAddressForNode(node) + "-" + repetition; + } +} diff --git a/src/main/java/net/spy/memcached/util/KetamaNodeLocatorConfiguration.java b/src/main/java/net/spy/memcached/util/KetamaNodeLocatorConfiguration.java new file mode 100644 index 000000000..b6a6b8f60 --- /dev/null +++ b/src/main/java/net/spy/memcached/util/KetamaNodeLocatorConfiguration.java @@ -0,0 +1,30 @@ +package net.spy.memcached.util; + +import net.spy.memcached.MemcachedNode; + +/** + * Defines the set of all configuration dependencies + * required for the KetamaNodeLocator algorithm to run + */ +public interface KetamaNodeLocatorConfiguration { + + /** + * Returns a uniquely identifying key, suitable for hashing by the + * KetamaNodeLocator algorithm. + + * @param node The MemcachedNode to use to form the unique identifier + * @param repetition The repetition number for the particular node in + * question (0 is the first repetition) + * @return The key that represents the specific repetition of the node + */ + public String getKeyForNode(MemcachedNode node, int repetition); + + /** + * Returns the number of discrete hashes that should be defined for each + * node in the continuum. + * + * @return a value greater than 0 + */ + int getNodeRepetitions(); + +} diff --git a/src/main/java/net/spy/memcached/util/package.html b/src/main/java/net/spy/memcached/util/package.html new file mode 100644 index 000000000..3d5faf973 --- /dev/null +++ b/src/main/java/net/spy/memcached/util/package.html @@ -0,0 +1,18 @@ + + + + + + Cache Utilities. + + + +

Cache Utilities.

+

+ Herein you'll find various things that make your life with cache + a little better. +

+ + + diff --git a/src/test/java/net/spy/memcached/AbstractNodeLocationCase.java b/src/test/java/net/spy/memcached/AbstractNodeLocationCase.java new file mode 100644 index 000000000..1f6dc095f --- /dev/null +++ b/src/test/java/net/spy/memcached/AbstractNodeLocationCase.java @@ -0,0 +1,62 @@ +package net.spy.memcached; + +import java.util.Iterator; + +import org.jmock.Mock; +import org.jmock.MockObjectTestCase; + +public abstract class AbstractNodeLocationCase extends MockObjectTestCase { + + protected MemcachedNode[] nodes; + protected Mock[] nodeMocks; + protected NodeLocator locator; + + private void runSequenceAssertion(NodeLocator l, String k, int... seq) { + int pos=0; + for(Iterator i=l.getSequence(k); i.hasNext(); ) { + assertEquals("At position " + pos, nodes[seq[pos]].toString(), + i.next().toString()); + try { + i.remove(); + fail("Allowed a removal from a sequence."); + } catch(UnsupportedOperationException e) { + // pass + } + pos++; + } + assertEquals("Incorrect sequence size for " + k, seq.length, pos); + } + + public final void testCloningGetPrimary() { + setupNodes(5); + assertTrue(locator.getReadonlyCopy().getPrimary("hi") + instanceof MemcachedNodeROImpl); + } + + public final void testCloningGetAll() { + setupNodes(5); + assertTrue(locator.getReadonlyCopy().getAll().iterator().next() + instanceof MemcachedNodeROImpl); + } + + public final void testCloningGetSequence() { + setupNodes(5); + assertTrue(locator.getReadonlyCopy().getSequence("hi").next() + instanceof MemcachedNodeROImpl); + } + + protected final void assertSequence(String k, int... seq) { + runSequenceAssertion(locator, k, seq); + runSequenceAssertion(locator.getReadonlyCopy(), k, seq); + } + + protected void setupNodes(int n) { + nodes=new MemcachedNode[n]; + nodeMocks=new Mock[nodes.length]; + + for(int i=0; i addrs= + AddrUtil.getAddresses("www.google.com:80"); + assertEquals(1, addrs.size()); + assertEquals("www.google.com", addrs.get(0).getHostName()); + assertEquals(80, addrs.get(0).getPort()); + } + + public void testTwo() throws Exception { + List addrs= + AddrUtil.getAddresses("www.google.com:80 www.yahoo.com:81"); + assertEquals(2, addrs.size()); + assertEquals("www.google.com", addrs.get(0).getHostName()); + assertEquals(80, addrs.get(0).getPort()); + assertEquals("www.yahoo.com", addrs.get(1).getHostName()); + assertEquals(81, addrs.get(1).getPort()); + } + + public void testThree() throws Exception { + List addrs= + AddrUtil.getAddresses(" , www.google.com:80 ,, ,, www.yahoo.com:81 , ,,"); + assertEquals(2, addrs.size()); + assertEquals("www.google.com", addrs.get(0).getHostName()); + assertEquals(80, addrs.get(0).getPort()); + assertEquals("www.yahoo.com", addrs.get(1).getHostName()); + assertEquals(81, addrs.get(1).getPort()); + } + + public void testBrokenHost() throws Exception { + String s="www.google.com:80 www.yahoo.com:81:more"; + try { + List addrs=AddrUtil.getAddresses(s); + fail("Expected failure, got " + addrs); + } catch(NumberFormatException e) { + e.printStackTrace(); + assertEquals("For input string: \"more\"", e.getMessage()); + } + } + + public void testBrokenHost2() throws Exception { + String s="www.google.com:80 www.yahoo.com"; + try { + List addrs=AddrUtil.getAddresses(s); + fail("Expected failure, got " + addrs); + } catch(IllegalArgumentException e) { + assertEquals("Invalid server ``www.yahoo.com'' in list: " + + s, e.getMessage()); + } + } + + public void testBrokenList() throws Exception { + String s=""; + try { + List addrs=AddrUtil.getAddresses(s); + fail("Expected failure, got " + addrs); + } catch(IllegalArgumentException e) { + assertEquals("No hosts in list: ``''", e.getMessage()); + } + } + + public void testBrokenList2() throws Exception { + String s=" "; + try { + List addrs=AddrUtil.getAddresses(s); + fail("Expected failure, got " + addrs); + } catch(IllegalArgumentException e) { + assertEquals("No hosts in list: `` ''", e.getMessage()); + } + } + + public void testNullList() throws Exception { + String s=null; + try { + List addrs=AddrUtil.getAddresses(s); + fail("Expected failure, got " + addrs); + } catch(NullPointerException e) { + assertEquals("Null host list", e.getMessage()); + } + } + + public void testIPv6Host() throws Exception { + List addrs= + AddrUtil.getAddresses("::1:80"); + assertEquals(1, addrs.size()); + + Set validLocalhostNames=new HashSet(); + validLocalhostNames.add("localhost"); + validLocalhostNames.add("ip6-localhost"); + validLocalhostNames.add("localhost6.localdomain6"); + validLocalhostNames.add("0:0:0:0:0:0:0:1"); + assert(validLocalhostNames.contains(addrs.get(0).getHostName())); + assertEquals(80, addrs.get(0).getPort()); + } +} diff --git a/src/test/java/net/spy/memcached/ArcusKetamaNodeLocatorTest.java b/src/test/java/net/spy/memcached/ArcusKetamaNodeLocatorTest.java new file mode 100644 index 000000000..5cecf424b --- /dev/null +++ b/src/test/java/net/spy/memcached/ArcusKetamaNodeLocatorTest.java @@ -0,0 +1,3423 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +/** + * Test Arcus ketama node location. + */ +public class ArcusKetamaNodeLocatorTest extends AbstractNodeLocationCase { + + protected void setupNodes(HashAlgorithm alg, int n) { + super.setupNodes(n); + for(int i=0; i all = locator.getAll(); + assertEquals(4, all.size()); + for(int i=0; i<4; i++) { + assertTrue(all.contains(nodes[i])); + } + } + + public void testAllClone() throws Exception { + setupNodes(4); + + Collection all = locator.getReadonlyCopy().getAll(); + assertEquals(4, all.size()); + } + + public void testLookups() { + setupNodes(4); + assertSame(nodes[0], locator.getPrimary("dustin")); + assertSame(nodes[2], locator.getPrimary("noelani")); + assertSame(nodes[0], locator.getPrimary("some other key")); + } + + public void testLookupsClone() { + setupNodes(4); + assertSame(nodes[0].toString(), + locator.getReadonlyCopy().getPrimary("dustin").toString()); + assertSame(nodes[2].toString(), + locator.getReadonlyCopy().getPrimary("noelani").toString()); + assertSame(nodes[0].toString(), + locator.getReadonlyCopy().getPrimary("some other key").toString()); + } + + public void testContinuumWrapping() { + setupNodes(4); + // This is the method by which I found something that would wrap + /* + String key="a"; + // maximum key found in the ketama continuum + long lastKey=((KetamaNodeLocator)locator).getMaxKey(); + while(HashAlgorithm.KETAMA_HASH.hash(key) < lastKey) { + key=PwGen.getPass(8); + } + System.out.println("Found a key past the end of the continuum: " + + key); + */ + assertEquals(4294887009L, ((ArcusKetamaNodeLocator)locator).getMaxKey()); + + assertSame(nodes[3], locator.getPrimary("V5XS8C8N")); + assertSame(nodes[3], locator.getPrimary("8KR2DKR2")); + assertSame(nodes[3], locator.getPrimary("L9KH6X4X")); + } + + public void testClusterResizing() { + setupNodes(4); + assertSame(nodes[0], locator.getPrimary("dustin")); + assertSame(nodes[2], locator.getPrimary("noelani")); + assertSame(nodes[0], locator.getPrimary("some other key")); + + setupNodes(5); + assertSame(nodes[0], locator.getPrimary("dustin")); + assertSame(nodes[2], locator.getPrimary("noelani")); + assertSame(nodes[4], locator.getPrimary("some other key")); + } + + public void testSequence1() { + setupNodes(4); + assertSequence("dustin", 0, 2, 1, 2); + } + + public void testSequence2() { + setupNodes(4); + assertSequence("noelani", 2, 1, 1, 3); + } + + private void assertPosForKey(String k, int nid) { + assertSame(nodes[nid], locator.getPrimary(k)); + } + + public void testLibKetamaCompat() { + setupNodes(5); + assertPosForKey("36", 2); + assertPosForKey("10037", 3); + assertPosForKey("22051", 1); + assertPosForKey("49044", 4); + } + + public void testFNV1A_32() { + HashAlgorithm alg=HashAlgorithm.FNV1A_32_HASH; + setupNodes(alg, 5); + assertSequence("noelani", 1, 2, 2, 2, 3); + + assertSame(nodes[2], locator.getPrimary("dustin")); + assertSame(nodes[1], locator.getPrimary("noelani")); + assertSame(nodes[4], locator.getPrimary("some other key")); + } + + private MemcachedNode[] mockNodes(String servers[]) { + setupNodes(servers.length); + + for(int i=0; i a=AddrUtil.getAddresses(servers[i]); + + nodeMocks[i].expects(atLeastOnce()) + .method("getSocketAddress") + .will(returnValue(a.iterator().next())); + + } + return nodes; + } + + public void testLibKetamaCompatTwo() { + String servers[] = { + "10.0.1.1:11211", + "10.0.1.2:11211", + "10.0.1.3:11211", + "10.0.1.4:11211", + "10.0.1.5:11211", + "10.0.1.6:11211", + "10.0.1.7:11211", + "10.0.1.8:11211"}; + locator=new ArcusKetamaNodeLocator(Arrays.asList(mockNodes(servers)), + HashAlgorithm.KETAMA_HASH); + + String[][] exp = { + {"0", "10.0.1.1:11211"}, + {"233", "10.0.1.7:11211"}, + {"466", "10.0.1.3:11211"}, + {"699", "10.0.1.1:11211"}, + {"932", "10.0.1.6:11211"}, + {"1165", "10.0.1.2:11211"}, + {"1398", "10.0.1.1:11211"}, + {"1631", "10.0.1.6:11211"}, + {"1864", "10.0.1.5:11211"}, + {"2097", "10.0.1.3:11211"}, + {"2330", "10.0.1.7:11211"}, + {"2563", "10.0.1.3:11211"}, + {"2796", "10.0.1.6:11211"}, + {"3029", "10.0.1.1:11211"}, + {"3262", "10.0.1.2:11211"}, + {"3495", "10.0.1.3:11211"}, + {"3728", "10.0.1.8:11211"}, + {"3961", "10.0.1.4:11211"}, + {"4194", "10.0.1.4:11211"}, + {"4427", "10.0.1.3:11211"}, + {"4660", "10.0.1.4:11211"}, + {"4893", "10.0.1.7:11211"}, + {"5126", "10.0.1.4:11211"}, + {"5359", "10.0.1.2:11211"}, + {"5592", "10.0.1.2:11211"}, + {"5825", "10.0.1.3:11211"}, + {"6058", "10.0.1.2:11211"}, + {"6291", "10.0.1.7:11211"}, + {"6524", "10.0.1.5:11211"}, + {"6757", "10.0.1.5:11211"}, + {"6990", "10.0.1.1:11211"}, + {"7223", "10.0.1.5:11211"}, + {"7456", "10.0.1.4:11211"}, + {"7689", "10.0.1.2:11211"}, + {"7922", "10.0.1.5:11211"}, + {"8155", "10.0.1.5:11211"}, + {"8388", "10.0.1.1:11211"}, + {"8621", "10.0.1.2:11211"}, + {"8854", "10.0.1.2:11211"}, + {"9087", "10.0.1.1:11211"}, + {"9320", "10.0.1.6:11211"}, + {"9553", "10.0.1.3:11211"}, + {"9786", "10.0.1.2:11211"}, + {"10019", "10.0.1.5:11211"}, + {"10252", "10.0.1.1:11211"}, + {"10485", "10.0.1.5:11211"}, + {"10718", "10.0.1.5:11211"}, + {"10951", "10.0.1.2:11211"}, + {"11184", "10.0.1.5:11211"}, + {"11417", "10.0.1.3:11211"}, + {"11650", "10.0.1.8:11211"}, + {"11883", "10.0.1.2:11211"}, + {"12116", "10.0.1.2:11211"}, + {"12349", "10.0.1.7:11211"}, + {"12582", "10.0.1.5:11211"}, + {"12815", "10.0.1.3:11211"}, + {"13048", "10.0.1.8:11211"}, + {"13281", "10.0.1.6:11211"}, + {"13514", "10.0.1.3:11211"}, + {"13747", "10.0.1.6:11211"}, + {"13980", "10.0.1.6:11211"}, + {"14213", "10.0.1.8:11211"}, + {"14446", "10.0.1.2:11211"}, + {"14679", "10.0.1.3:11211"}, + {"14912", "10.0.1.7:11211"}, + {"15145", "10.0.1.1:11211"}, + {"15378", "10.0.1.4:11211"}, + {"15611", "10.0.1.1:11211"}, + {"15844", "10.0.1.3:11211"}, + {"16077", "10.0.1.3:11211"}, + {"16310", "10.0.1.5:11211"}, + {"16543", "10.0.1.5:11211"}, + {"16776", "10.0.1.4:11211"}, + {"17009", "10.0.1.1:11211"}, + {"17242", "10.0.1.4:11211"}, + {"17475", "10.0.1.7:11211"}, + {"17708", "10.0.1.6:11211"}, + {"17941", "10.0.1.2:11211"}, + {"28892", "10.0.1.7:11211"}, + {"29125", "10.0.1.4:11211"}, + {"29358", "10.0.1.7:11211"}, + {"29591", "10.0.1.5:11211"}, + {"29824", "10.0.1.7:11211"}, + {"30057", "10.0.1.7:11211"}, + {"30290", "10.0.1.3:11211"}, + {"30523", "10.0.1.8:11211"}, + {"30756", "10.0.1.3:11211"}, + {"30989", "10.0.1.4:11211"}, + {"31222", "10.0.1.6:11211"}, + {"31455", "10.0.1.1:11211"}, + {"31688", "10.0.1.2:11211"}, + {"31921", "10.0.1.2:11211"}, + {"32154", "10.0.1.8:11211"}, + {"32387", "10.0.1.1:11211"}, + {"32620", "10.0.1.3:11211"}, + {"32853", "10.0.1.2:11211"}, + {"33086", "10.0.1.7:11211"}, + {"33319", "10.0.1.4:11211"}, + {"33552", "10.0.1.5:11211"}, + {"33785", "10.0.1.3:11211"}, + {"34018", "10.0.1.3:11211"}, + {"34251", "10.0.1.6:11211"}, + {"34484", "10.0.1.7:11211"}, + {"34717", "10.0.1.1:11211"}, + {"34950", "10.0.1.3:11211"}, + {"35183", "10.0.1.6:11211"}, + {"35416", "10.0.1.8:11211"}, + {"35649", "10.0.1.5:11211"}, + {"35882", "10.0.1.7:11211"}, + {"36115", "10.0.1.2:11211"}, + {"36348", "10.0.1.5:11211"}, + {"36581", "10.0.1.7:11211"}, + {"36814", "10.0.1.8:11211"}, + {"37047", "10.0.1.8:11211"}, + {"37280", "10.0.1.8:11211"}, + {"42173", "10.0.1.8:11211"}, + {"42406", "10.0.1.3:11211"}, + {"47998", "10.0.1.2:11211"}, + {"48231", "10.0.1.5:11211"}, + {"48464", "10.0.1.5:11211"}, + {"48697", "10.0.1.3:11211"}, + {"48930", "10.0.1.1:11211"}, + {"49163", "10.0.1.2:11211"}, + {"49396", "10.0.1.8:11211"}, + {"49629", "10.0.1.1:11211"}, + {"49862", "10.0.1.8:11211"}, + {"50095", "10.0.1.5:11211"}, + {"50328", "10.0.1.2:11211"}, + {"50561", "10.0.1.5:11211"}, + {"50794", "10.0.1.7:11211"}, + {"51027", "10.0.1.3:11211"}, + {"51260", "10.0.1.5:11211"}, + {"51493", "10.0.1.3:11211"}, + {"51726", "10.0.1.8:11211"}, + {"51959", "10.0.1.2:11211"}, + {"52192", "10.0.1.8:11211"}, + {"56153", "10.0.1.2:11211"}, + {"56386", "10.0.1.6:11211"}, + {"56619", "10.0.1.8:11211"}, + {"56852", "10.0.1.6:11211"}, + {"57085", "10.0.1.2:11211"}, + {"57318", "10.0.1.7:11211"}, + {"57551", "10.0.1.8:11211"}, + {"57784", "10.0.1.4:11211"}, + {"58017", "10.0.1.6:11211"}, + {"58250", "10.0.1.8:11211"}, + {"58483", "10.0.1.8:11211"}, + {"58716", "10.0.1.6:11211"}, + {"58949", "10.0.1.7:11211"}, + {"59182", "10.0.1.3:11211"}, + {"59415", "10.0.1.2:11211"}, + {"59648", "10.0.1.7:11211"}, + {"59881", "10.0.1.8:11211"}, + {"60114", "10.0.1.8:11211"}, + {"60347", "10.0.1.3:11211"}, + {"60580", "10.0.1.6:11211"}, + {"60813", "10.0.1.8:11211"}, + {"61046", "10.0.1.6:11211"}, + {"61279", "10.0.1.7:11211"}, + {"61512", "10.0.1.5:11211"}, + {"61745", "10.0.1.7:11211"}, + {"61978", "10.0.1.8:11211"}, + {"62211", "10.0.1.7:11211"}, + {"62444", "10.0.1.1:11211"}, + {"62677", "10.0.1.7:11211"}, + {"62910", "10.0.1.3:11211"}, + {"63143", "10.0.1.2:11211"}, + {"63376", "10.0.1.2:11211"}, + {"63609", "10.0.1.6:11211"}, + {"63842", "10.0.1.2:11211"}, + {"64075", "10.0.1.5:11211"}, + {"64308", "10.0.1.6:11211"}, + {"64541", "10.0.1.5:11211"}, + {"64774", "10.0.1.4:11211"}, + {"65007", "10.0.1.7:11211"}, + {"65240", "10.0.1.7:11211"}, + {"65473", "10.0.1.6:11211"}, + {"65706", "10.0.1.8:11211"}, + {"65939", "10.0.1.4:11211"}, + {"66172", "10.0.1.1:11211"}, + {"66405", "10.0.1.2:11211"}, + {"66638", "10.0.1.6:11211"}, + {"66871", "10.0.1.5:11211"}, + {"67104", "10.0.1.2:11211"}, + {"67337", "10.0.1.8:11211"}, + {"67570", "10.0.1.8:11211"}, + {"67803", "10.0.1.5:11211"}, + {"68036", "10.0.1.8:11211"}, + {"68269", "10.0.1.4:11211"}, + {"68502", "10.0.1.7:11211"}, + {"68735", "10.0.1.1:11211"}, + {"68968", "10.0.1.6:11211"}, + {"69201", "10.0.1.6:11211"}, + {"69434", "10.0.1.6:11211"}, + {"69667", "10.0.1.3:11211"}, + {"69900", "10.0.1.2:11211"}, + {"70133", "10.0.1.8:11211"}, + {"70366", "10.0.1.2:11211"}, + {"70599", "10.0.1.2:11211"}, + {"70832", "10.0.1.1:11211"}, + {"71065", "10.0.1.5:11211"}, + {"71298", "10.0.1.2:11211"}, + {"71531", "10.0.1.2:11211"}, + {"71764", "10.0.1.5:11211"}, + {"71997", "10.0.1.5:11211"}, + {"72230", "10.0.1.2:11211"}, + {"72463", "10.0.1.7:11211"}, + {"72696", "10.0.1.6:11211"}, + {"72929", "10.0.1.4:11211"}, + {"73162", "10.0.1.4:11211"}, + {"73395", "10.0.1.7:11211"}, + {"73628", "10.0.1.7:11211"}, + {"73861", "10.0.1.1:11211"}, + {"74094", "10.0.1.6:11211"}, + {"74327", "10.0.1.1:11211"}, + {"74560", "10.0.1.6:11211"}, + {"74793", "10.0.1.1:11211"}, + {"75026", "10.0.1.5:11211"}, + {"75259", "10.0.1.5:11211"}, + {"75492", "10.0.1.8:11211"}, + {"75725", "10.0.1.7:11211"}, + {"75958", "10.0.1.4:11211"}, + {"76191", "10.0.1.5:11211"}, + {"76424", "10.0.1.5:11211"}, + {"76657", "10.0.1.2:11211"}, + {"76890", "10.0.1.7:11211"}, + {"77123", "10.0.1.4:11211"}, + {"77356", "10.0.1.2:11211"}, + {"77589", "10.0.1.6:11211"}, + {"77822", "10.0.1.1:11211"}, + {"78055", "10.0.1.6:11211"}, + {"78288", "10.0.1.7:11211"}, + {"78521", "10.0.1.7:11211"}, + {"78754", "10.0.1.5:11211"}, + {"78987", "10.0.1.6:11211"}, + {"79220", "10.0.1.4:11211"}, + {"79453", "10.0.1.6:11211"}, + {"79686", "10.0.1.4:11211"}, + {"79919", "10.0.1.3:11211"}, + {"80152", "10.0.1.2:11211"}, + {"80385", "10.0.1.6:11211"}, + {"80618", "10.0.1.5:11211"}, + {"80851", "10.0.1.7:11211"}, + {"81084", "10.0.1.8:11211"}, + {"81317", "10.0.1.5:11211"}, + {"81550", "10.0.1.8:11211"}, + {"81783", "10.0.1.4:11211"}, + {"82016", "10.0.1.8:11211"}, + {"82249", "10.0.1.5:11211"}, + {"82482", "10.0.1.5:11211"}, + {"82715", "10.0.1.5:11211"}, + {"82948", "10.0.1.5:11211"}, + {"83181", "10.0.1.1:11211"}, + {"83414", "10.0.1.1:11211"}, + {"83647", "10.0.1.2:11211"}, + {"83880", "10.0.1.2:11211"}, + {"84113", "10.0.1.6:11211"}, + {"84346", "10.0.1.6:11211"}, + {"84579", "10.0.1.5:11211"}, + {"84812", "10.0.1.8:11211"}, + {"85045", "10.0.1.6:11211"}, + {"85278", "10.0.1.7:11211"}, + {"85511", "10.0.1.2:11211"}, + {"85744", "10.0.1.1:11211"}, + {"85977", "10.0.1.6:11211"}, + {"86210", "10.0.1.7:11211"}, + {"86443", "10.0.1.4:11211"}, + {"86676", "10.0.1.3:11211"}, + {"86909", "10.0.1.1:11211"}, + {"87142", "10.0.1.8:11211"}, + {"87375", "10.0.1.1:11211"}, + {"87608", "10.0.1.7:11211"}, + {"87841", "10.0.1.1:11211"}, + {"88074", "10.0.1.1:11211"}, + {"88307", "10.0.1.7:11211"}, + {"88540", "10.0.1.4:11211"}, + {"88773", "10.0.1.5:11211"}, + {"89006", "10.0.1.2:11211"}, + {"89239", "10.0.1.1:11211"}, + {"89472", "10.0.1.6:11211"}, + {"89705", "10.0.1.2:11211"}, + {"89938", "10.0.1.2:11211"}, + {"90171", "10.0.1.7:11211"}, + {"90404", "10.0.1.5:11211"}, + {"90637", "10.0.1.8:11211"}, + {"90870", "10.0.1.8:11211"}, + {"91103", "10.0.1.7:11211"}, + {"91336", "10.0.1.5:11211"}, + {"91569", "10.0.1.8:11211"}, + {"91802", "10.0.1.2:11211"}, + {"92035", "10.0.1.8:11211"}, + {"92268", "10.0.1.4:11211"}, + {"92501", "10.0.1.6:11211"}, + {"92734", "10.0.1.2:11211"}, + {"92967", "10.0.1.6:11211"}, + {"93200", "10.0.1.1:11211"}, + {"93433", "10.0.1.2:11211"}, + {"93666", "10.0.1.6:11211"}, + {"93899", "10.0.1.2:11211"}, + {"94132", "10.0.1.2:11211"}, + {"103685", "10.0.1.2:11211"}, + {"103918", "10.0.1.7:11211"}, + {"104151", "10.0.1.5:11211"}, + {"104384", "10.0.1.1:11211"}, + {"104617", "10.0.1.3:11211"}, + {"104850", "10.0.1.3:11211"}, + {"105083", "10.0.1.7:11211"}, + {"105316", "10.0.1.2:11211"}, + {"105549", "10.0.1.3:11211"}, + {"105782", "10.0.1.2:11211"}, + {"106015", "10.0.1.4:11211"}, + {"106248", "10.0.1.3:11211"}, + {"106481", "10.0.1.4:11211"}, + {"106714", "10.0.1.8:11211"}, + {"106947", "10.0.1.8:11211"}, + {"107180", "10.0.1.6:11211"}, + {"107413", "10.0.1.7:11211"}, + {"107646", "10.0.1.5:11211"}, + {"107879", "10.0.1.8:11211"}, + {"108112", "10.0.1.1:11211"}, + {"108345", "10.0.1.8:11211"}, + {"108578", "10.0.1.5:11211"}, + {"112539", "10.0.1.8:11211"}, + {"112772", "10.0.1.1:11211"}, + {"113005", "10.0.1.5:11211"}, + {"113238", "10.0.1.4:11211"}, + {"116034", "10.0.1.8:11211"}, + {"116267", "10.0.1.1:11211"}, + {"116500", "10.0.1.6:11211"}, + {"116733", "10.0.1.1:11211"}, + {"116966", "10.0.1.6:11211"}, + {"117199", "10.0.1.4:11211"}, + {"117432", "10.0.1.1:11211"}, + {"117665", "10.0.1.2:11211"}, + {"117898", "10.0.1.6:11211"}, + {"118131", "10.0.1.3:11211"}, + {"118364", "10.0.1.2:11211"}, + {"118597", "10.0.1.5:11211"}, + {"118830", "10.0.1.5:11211"}, + {"119063", "10.0.1.3:11211"}, + {"119296", "10.0.1.6:11211"}, + {"119529", "10.0.1.1:11211"}, + {"119762", "10.0.1.6:11211"}, + {"119995", "10.0.1.7:11211"}, + {"120228", "10.0.1.2:11211"}, + {"120461", "10.0.1.2:11211"}, + {"124888", "10.0.1.3:11211"}, + {"125121", "10.0.1.6:11211"}, + {"125354", "10.0.1.5:11211"}, + {"125587", "10.0.1.2:11211"}, + {"125820", "10.0.1.3:11211"}, + {"126053", "10.0.1.5:11211"}, + {"126286", "10.0.1.5:11211"}, + {"126519", "10.0.1.2:11211"}, + {"126752", "10.0.1.6:11211"}, + {"126985", "10.0.1.7:11211"}, + {"127218", "10.0.1.6:11211"}, + {"127451", "10.0.1.7:11211"}, + {"127684", "10.0.1.6:11211"}, + {"127917", "10.0.1.7:11211"}, + {"128150", "10.0.1.6:11211"}, + {"128383", "10.0.1.1:11211"}, + {"128616", "10.0.1.4:11211"}, + {"128849", "10.0.1.3:11211"}, + {"129082", "10.0.1.5:11211"}, + {"129315", "10.0.1.8:11211"}, + {"129548", "10.0.1.6:11211"}, + {"129781", "10.0.1.6:11211"}, + {"130014", "10.0.1.6:11211"}, + {"130247", "10.0.1.5:11211"}, + {"130480", "10.0.1.6:11211"}, + {"130713", "10.0.1.2:11211"}, + {"130946", "10.0.1.5:11211"}, + {"131179", "10.0.1.5:11211"}, + {"131412", "10.0.1.7:11211"}, + {"131645", "10.0.1.2:11211"}, + {"131878", "10.0.1.6:11211"}, + {"132111", "10.0.1.5:11211"}, + {"132344", "10.0.1.8:11211"}, + {"132577", "10.0.1.1:11211"}, + {"132810", "10.0.1.1:11211"}, + {"133043", "10.0.1.7:11211"}, + {"133276", "10.0.1.4:11211"}, + {"133509", "10.0.1.8:11211"}, + {"133742", "10.0.1.3:11211"}, + {"133975", "10.0.1.5:11211"}, + {"134208", "10.0.1.1:11211"}, + {"134441", "10.0.1.8:11211"}, + {"134674", "10.0.1.7:11211"}, + {"134907", "10.0.1.4:11211"}, + {"135140", "10.0.1.3:11211"}, + {"135373", "10.0.1.5:11211"}, + {"135606", "10.0.1.7:11211"}, + {"135839", "10.0.1.8:11211"}, + {"136072", "10.0.1.8:11211"}, + {"136305", "10.0.1.7:11211"}, + {"136538", "10.0.1.1:11211"}, + {"136771", "10.0.1.1:11211"}, + {"137004", "10.0.1.2:11211"}, + {"137237", "10.0.1.2:11211"}, + {"137470", "10.0.1.2:11211"}, + {"137703", "10.0.1.3:11211"}, + {"137936", "10.0.1.1:11211"}, + {"138169", "10.0.1.6:11211"}, + {"138402", "10.0.1.8:11211"}, + {"138635", "10.0.1.7:11211"}, + {"138868", "10.0.1.2:11211"}, + {"139101", "10.0.1.4:11211"}, + {"139334", "10.0.1.7:11211"}, + {"139567", "10.0.1.6:11211"}, + {"139800", "10.0.1.1:11211"}, + {"140033", "10.0.1.8:11211"}, + {"140266", "10.0.1.7:11211"}, + {"140499", "10.0.1.3:11211"}, + {"140732", "10.0.1.2:11211"}, + {"140965", "10.0.1.7:11211"}, + {"141198", "10.0.1.6:11211"}, + {"141431", "10.0.1.7:11211"}, + {"141664", "10.0.1.5:11211"}, + {"141897", "10.0.1.5:11211"}, + {"142130", "10.0.1.5:11211"}, + {"142363", "10.0.1.4:11211"}, + {"142596", "10.0.1.8:11211"}, + {"142829", "10.0.1.2:11211"}, + {"143062", "10.0.1.2:11211"}, + {"143295", "10.0.1.4:11211"}, + {"143528", "10.0.1.8:11211"}, + {"143761", "10.0.1.2:11211"}, + {"143994", "10.0.1.5:11211"}, + {"144227", "10.0.1.3:11211"}, + {"144460", "10.0.1.2:11211"}, + {"152149", "10.0.1.2:11211"}, + {"152382", "10.0.1.2:11211"}, + {"152615", "10.0.1.3:11211"}, + {"152848", "10.0.1.5:11211"}, + {"153081", "10.0.1.1:11211"}, + {"153314", "10.0.1.1:11211"}, + {"153547", "10.0.1.1:11211"}, + {"153780", "10.0.1.1:11211"}, + {"154013", "10.0.1.2:11211"}, + {"154246", "10.0.1.1:11211"}, + {"154479", "10.0.1.8:11211"}, + {"154712", "10.0.1.1:11211"}, + {"154945", "10.0.1.1:11211"}, + {"155178", "10.0.1.8:11211"}, + {"155411", "10.0.1.8:11211"}, + {"155644", "10.0.1.8:11211"}, + {"155877", "10.0.1.2:11211"}, + {"156110", "10.0.1.3:11211"}, + {"156343", "10.0.1.1:11211"}, + {"156576", "10.0.1.7:11211"}, + {"156809", "10.0.1.7:11211"}, + {"157042", "10.0.1.1:11211"}, + {"157275", "10.0.1.7:11211"}, + {"157508", "10.0.1.2:11211"}, + {"157741", "10.0.1.7:11211"}, + {"157974", "10.0.1.5:11211"}, + {"158207", "10.0.1.5:11211"}, + {"158440", "10.0.1.4:11211"}, + {"158673", "10.0.1.3:11211"}, + {"158906", "10.0.1.3:11211"}, + {"159139", "10.0.1.8:11211"}, + {"159372", "10.0.1.6:11211"}, + {"159605", "10.0.1.3:11211"}, + {"159838", "10.0.1.4:11211"}, + {"160071", "10.0.1.2:11211"}, + {"160304", "10.0.1.4:11211"}, + {"160537", "10.0.1.6:11211"}, + {"160770", "10.0.1.5:11211"}, + {"161003", "10.0.1.3:11211"}, + {"161236", "10.0.1.7:11211"}, + {"161469", "10.0.1.5:11211"}, + {"161702", "10.0.1.7:11211"}, + {"161935", "10.0.1.8:11211"}, + {"162168", "10.0.1.8:11211"}, + {"162401", "10.0.1.8:11211"}, + {"162634", "10.0.1.8:11211"}, + {"162867", "10.0.1.6:11211"}, + {"163100", "10.0.1.8:11211"}, + {"163333", "10.0.1.7:11211"}, + {"163566", "10.0.1.7:11211"}, + {"163799", "10.0.1.3:11211"}, + {"164032", "10.0.1.6:11211"}, + {"164265", "10.0.1.8:11211"}, + {"169158", "10.0.1.7:11211"}, + {"169391", "10.0.1.6:11211"}, + {"169624", "10.0.1.6:11211"}, + {"169857", "10.0.1.6:11211"}, + {"170090", "10.0.1.8:11211"}, + {"170323", "10.0.1.4:11211"}, + {"170556", "10.0.1.2:11211"}, + {"170789", "10.0.1.8:11211"}, + {"171022", "10.0.1.8:11211"}, + {"171255", "10.0.1.4:11211"}, + {"171488", "10.0.1.7:11211"}, + {"171721", "10.0.1.4:11211"}, + {"171954", "10.0.1.5:11211"}, + {"172187", "10.0.1.4:11211"}, + {"172420", "10.0.1.3:11211"}, + {"172653", "10.0.1.2:11211"}, + {"172886", "10.0.1.1:11211"}, + {"173119", "10.0.1.8:11211"}, + {"173352", "10.0.1.5:11211"}, + {"176614", "10.0.1.3:11211"}, + {"176847", "10.0.1.8:11211"}, + {"177080", "10.0.1.1:11211"}, + {"177313", "10.0.1.4:11211"}, + {"177546", "10.0.1.2:11211"}, + {"177779", "10.0.1.8:11211"}, + {"178012", "10.0.1.6:11211"}, + {"178245", "10.0.1.3:11211"}, + {"178478", "10.0.1.7:11211"}, + {"178711", "10.0.1.3:11211"}, + {"178944", "10.0.1.8:11211"}, + {"179177", "10.0.1.1:11211"}, + {"179410", "10.0.1.6:11211"}, + {"179643", "10.0.1.1:11211"}, + {"179876", "10.0.1.8:11211"}, + {"180109", "10.0.1.3:11211"}, + {"180342", "10.0.1.2:11211"}, + {"180575", "10.0.1.7:11211"}, + {"180808", "10.0.1.8:11211"}, + {"181041", "10.0.1.5:11211"}, + {"181274", "10.0.1.6:11211"}, + {"181507", "10.0.1.8:11211"}, + {"181740", "10.0.1.6:11211"}, + {"181973", "10.0.1.5:11211"}, + {"182206", "10.0.1.4:11211"}, + {"182439", "10.0.1.2:11211"}, + {"182672", "10.0.1.8:11211"}, + {"182905", "10.0.1.8:11211"}, + {"183138", "10.0.1.4:11211"}, + {"183371", "10.0.1.8:11211"}, + {"183604", "10.0.1.7:11211"}, + {"183837", "10.0.1.7:11211"}, + {"184070", "10.0.1.4:11211"}, + {"184303", "10.0.1.7:11211"}, + {"184536", "10.0.1.1:11211"}, + {"184769", "10.0.1.3:11211"}, + {"185002", "10.0.1.1:11211"}, + {"185235", "10.0.1.6:11211"}, + {"185468", "10.0.1.7:11211"}, + {"185701", "10.0.1.4:11211"}, + {"185934", "10.0.1.1:11211"}, + {"186167", "10.0.1.8:11211"}, + {"186400", "10.0.1.8:11211"}, + {"186633", "10.0.1.1:11211"}, + {"186866", "10.0.1.5:11211"}, + {"187099", "10.0.1.8:11211"}, + {"187332", "10.0.1.1:11211"}, + {"187565", "10.0.1.5:11211"}, + {"187798", "10.0.1.1:11211"}, + {"188031", "10.0.1.8:11211"}, + {"188264", "10.0.1.5:11211"}, + {"188497", "10.0.1.2:11211"}, + {"188730", "10.0.1.6:11211"}, + {"196419", "10.0.1.8:11211"}, + {"196652", "10.0.1.7:11211"}, + {"196885", "10.0.1.1:11211"}, + {"197118", "10.0.1.6:11211"}, + {"197351", "10.0.1.1:11211"}, + {"197584", "10.0.1.1:11211"}, + {"197817", "10.0.1.8:11211"}, + {"198050", "10.0.1.6:11211"}, + {"198283", "10.0.1.3:11211"}, + {"198516", "10.0.1.8:11211"}, + {"198749", "10.0.1.6:11211"}, + {"198982", "10.0.1.2:11211"}, + {"199215", "10.0.1.4:11211"}, + {"199448", "10.0.1.5:11211"}, + {"199681", "10.0.1.6:11211"}, + {"199914", "10.0.1.6:11211"}, + {"200147", "10.0.1.3:11211"}, + {"200380", "10.0.1.4:11211"}, + {"200613", "10.0.1.1:11211"}, + {"200846", "10.0.1.6:11211"}, + {"201079", "10.0.1.7:11211"}, + {"201312", "10.0.1.7:11211"}, + {"201545", "10.0.1.1:11211"}, + {"201778", "10.0.1.1:11211"}, + {"202011", "10.0.1.7:11211"}, + {"202244", "10.0.1.7:11211"}, + {"202477", "10.0.1.6:11211"}, + {"202710", "10.0.1.1:11211"}, + {"202943", "10.0.1.1:11211"}, + {"203176", "10.0.1.1:11211"}, + {"203409", "10.0.1.3:11211"}, + {"203642", "10.0.1.5:11211"}, + {"203875", "10.0.1.1:11211"}, + {"204108", "10.0.1.8:11211"}, + {"204341", "10.0.1.1:11211"}, + {"204574", "10.0.1.4:11211"}, + {"204807", "10.0.1.3:11211"}, + {"205040", "10.0.1.7:11211"}, + {"205273", "10.0.1.2:11211"}, + {"205506", "10.0.1.6:11211"}, + {"205739", "10.0.1.2:11211"}, + {"205972", "10.0.1.6:11211"}, + {"206205", "10.0.1.6:11211"}, + {"206438", "10.0.1.6:11211"}, + {"206671", "10.0.1.7:11211"}, + {"206904", "10.0.1.6:11211"}, + {"207137", "10.0.1.7:11211"}, + {"207370", "10.0.1.5:11211"}, + {"207603", "10.0.1.1:11211"}, + {"207836", "10.0.1.5:11211"}, + {"208069", "10.0.1.6:11211"}, + {"208302", "10.0.1.8:11211"}, + {"208535", "10.0.1.5:11211"}, + {"208768", "10.0.1.3:11211"}, + {"209001", "10.0.1.2:11211"}, + {"209234", "10.0.1.3:11211"}, + {"209467", "10.0.1.4:11211"}, + {"209700", "10.0.1.6:11211"}, + {"209933", "10.0.1.5:11211"}, + {"210166", "10.0.1.1:11211"}, + {"210399", "10.0.1.8:11211"}, + {"210632", "10.0.1.4:11211"}, + {"210865", "10.0.1.8:11211"}, + {"211098", "10.0.1.8:11211"}, + {"211331", "10.0.1.3:11211"}, + {"211564", "10.0.1.6:11211"}, + {"211797", "10.0.1.8:11211"}, + {"212030", "10.0.1.8:11211"}, + {"212263", "10.0.1.7:11211"}, + {"212496", "10.0.1.8:11211"}, + {"212729", "10.0.1.1:11211"}, + {"212962", "10.0.1.7:11211"}, + {"213195", "10.0.1.6:11211"}, + {"213428", "10.0.1.2:11211"}, + {"213661", "10.0.1.8:11211"}, + {"213894", "10.0.1.5:11211"}, + {"214127", "10.0.1.3:11211"}, + {"214360", "10.0.1.1:11211"}, + {"214593", "10.0.1.2:11211"}, + {"214826", "10.0.1.7:11211"}, + {"215059", "10.0.1.7:11211"}, + {"215292", "10.0.1.2:11211"}, + {"215525", "10.0.1.1:11211"}, + {"215758", "10.0.1.8:11211"}, + {"215991", "10.0.1.3:11211"}, + {"216224", "10.0.1.8:11211"}, + {"216457", "10.0.1.3:11211"}, + {"216690", "10.0.1.7:11211"}, + {"216923", "10.0.1.2:11211"}, + {"217156", "10.0.1.2:11211"}, + {"217389", "10.0.1.2:11211"}, + {"217622", "10.0.1.4:11211"}, + {"217855", "10.0.1.3:11211"}, + {"218088", "10.0.1.7:11211"}, + {"218321", "10.0.1.8:11211"}, + {"218554", "10.0.1.7:11211"}, + {"218787", "10.0.1.2:11211"}, + {"219020", "10.0.1.8:11211"}, + {"219253", "10.0.1.8:11211"}, + {"219486", "10.0.1.5:11211"}, + {"219719", "10.0.1.2:11211"}, + {"219952", "10.0.1.3:11211"}, + {"220185", "10.0.1.7:11211"}, + {"220418", "10.0.1.7:11211"}, + {"220651", "10.0.1.7:11211"}, + {"220884", "10.0.1.5:11211"}, + {"221117", "10.0.1.5:11211"}, + {"221350", "10.0.1.8:11211"}, + {"221583", "10.0.1.7:11211"}, + {"221816", "10.0.1.7:11211"}, + {"222049", "10.0.1.8:11211"}, + {"222282", "10.0.1.2:11211"}, + {"222515", "10.0.1.8:11211"}, + {"222748", "10.0.1.8:11211"}, + {"222981", "10.0.1.6:11211"}, + {"223214", "10.0.1.2:11211"}, + {"223447", "10.0.1.4:11211"}, + {"223680", "10.0.1.2:11211"}, + {"223913", "10.0.1.8:11211"}, + {"224146", "10.0.1.8:11211"}, + {"224379", "10.0.1.3:11211"}, + {"224612", "10.0.1.5:11211"}, + {"224845", "10.0.1.8:11211"}, + {"225078", "10.0.1.8:11211"}, + {"225311", "10.0.1.3:11211"}, + {"225544", "10.0.1.5:11211"}, + {"225777", "10.0.1.1:11211"}, + {"226010", "10.0.1.6:11211"}, + {"226243", "10.0.1.7:11211"}, + {"226476", "10.0.1.2:11211"}, + {"226709", "10.0.1.3:11211"}, + {"226942", "10.0.1.5:11211"}, + {"227175", "10.0.1.3:11211"}, + {"227408", "10.0.1.1:11211"}, + {"227641", "10.0.1.8:11211"}, + {"227874", "10.0.1.4:11211"}, + {"228107", "10.0.1.6:11211"}, + {"228340", "10.0.1.4:11211"}, + {"228573", "10.0.1.1:11211"}, + {"228806", "10.0.1.3:11211"}, + {"238825", "10.0.1.2:11211"}, + {"239058", "10.0.1.5:11211"}, + {"239291", "10.0.1.5:11211"}, + {"239524", "10.0.1.6:11211"}, + {"239757", "10.0.1.5:11211"}, + {"239990", "10.0.1.7:11211"}, + {"240223", "10.0.1.7:11211"}, + {"240456", "10.0.1.5:11211"}, + {"240689", "10.0.1.5:11211"}, + {"240922", "10.0.1.1:11211"}, + {"241155", "10.0.1.1:11211"}, + {"241388", "10.0.1.7:11211"}, + {"241621", "10.0.1.1:11211"}, + {"241854", "10.0.1.8:11211"}, + {"242087", "10.0.1.6:11211"}, + {"242320", "10.0.1.8:11211"}, + {"242553", "10.0.1.2:11211"}, + {"242786", "10.0.1.5:11211"}, + {"243019", "10.0.1.6:11211"}, + {"243252", "10.0.1.4:11211"}, + {"243485", "10.0.1.8:11211"}, + {"243718", "10.0.1.7:11211"}, + {"243951", "10.0.1.5:11211"}, + {"249310", "10.0.1.5:11211"}, + {"249543", "10.0.1.8:11211"}, + {"249776", "10.0.1.7:11211"}, + {"250009", "10.0.1.3:11211"}, + {"250242", "10.0.1.8:11211"}, + {"250475", "10.0.1.4:11211"}, + {"250708", "10.0.1.8:11211"}, + {"250941", "10.0.1.1:11211"}, + {"251174", "10.0.1.5:11211"}, + {"251407", "10.0.1.8:11211"}, + {"251640", "10.0.1.1:11211"}, + {"251873", "10.0.1.2:11211"}, + {"252106", "10.0.1.5:11211"}, + {"252339", "10.0.1.2:11211"}, + {"252572", "10.0.1.5:11211"}, + {"252805", "10.0.1.8:11211"}, + {"253038", "10.0.1.5:11211"}, + {"253271", "10.0.1.7:11211"}, + {"253504", "10.0.1.4:11211"}, + {"253737", "10.0.1.3:11211"}, + {"253970", "10.0.1.7:11211"}, + {"254203", "10.0.1.6:11211"}, + {"254436", "10.0.1.2:11211"}, + {"254669", "10.0.1.5:11211"}, + {"254902", "10.0.1.8:11211"}, + {"255135", "10.0.1.5:11211"}, + {"255368", "10.0.1.3:11211"}, + {"255601", "10.0.1.4:11211"}, + {"255834", "10.0.1.1:11211"}, + {"256067", "10.0.1.8:11211"}, + {"260960", "10.0.1.5:11211"}, + {"261193", "10.0.1.5:11211"}, + {"261426", "10.0.1.2:11211"}, + {"261659", "10.0.1.2:11211"}, + {"261892", "10.0.1.2:11211"}, + {"262125", "10.0.1.8:11211"}, + {"262358", "10.0.1.6:11211"}, + {"262591", "10.0.1.8:11211"}, + {"262824", "10.0.1.1:11211"}, + {"263057", "10.0.1.5:11211"}, + {"263290", "10.0.1.1:11211"}, + {"263523", "10.0.1.3:11211"}, + {"263756", "10.0.1.2:11211"}, + {"263989", "10.0.1.5:11211"}, + {"264222", "10.0.1.5:11211"}, + {"264455", "10.0.1.5:11211"}, + {"264688", "10.0.1.5:11211"}, + {"264921", "10.0.1.6:11211"}, + {"265154", "10.0.1.7:11211"}, + {"265387", "10.0.1.7:11211"}, + {"265620", "10.0.1.4:11211"}, + {"265853", "10.0.1.5:11211"}, + {"266086", "10.0.1.6:11211"}, + {"266319", "10.0.1.1:11211"}, + {"266552", "10.0.1.2:11211"}, + {"266785", "10.0.1.7:11211"}, + {"267018", "10.0.1.1:11211"}, + {"267251", "10.0.1.6:11211"}, + {"267484", "10.0.1.8:11211"}, + {"267717", "10.0.1.3:11211"}, + {"267950", "10.0.1.2:11211"}, + {"268183", "10.0.1.5:11211"}, + {"268416", "10.0.1.6:11211"}, + {"268649", "10.0.1.5:11211"}, + {"268882", "10.0.1.6:11211"}, + {"269115", "10.0.1.5:11211"}, + {"269348", "10.0.1.2:11211"}, + {"269581", "10.0.1.4:11211"}, + {"269814", "10.0.1.6:11211"}, + {"270047", "10.0.1.2:11211"}, + {"270280", "10.0.1.1:11211"}, + {"270513", "10.0.1.8:11211"}, + {"270746", "10.0.1.6:11211"}, + {"270979", "10.0.1.4:11211"}, + {"271212", "10.0.1.6:11211"}, + {"271445", "10.0.1.8:11211"}, + {"271678", "10.0.1.7:11211"}, + {"271911", "10.0.1.3:11211"}, + {"272144", "10.0.1.8:11211"}, + {"272377", "10.0.1.7:11211"}, + {"272610", "10.0.1.1:11211"}, + {"272843", "10.0.1.8:11211"}, + {"273076", "10.0.1.8:11211"}, + {"273309", "10.0.1.1:11211"}, + {"273542", "10.0.1.8:11211"}, + {"273775", "10.0.1.5:11211"}, + {"274008", "10.0.1.7:11211"}, + {"274241", "10.0.1.7:11211"}, + {"274474", "10.0.1.6:11211"}, + {"274707", "10.0.1.6:11211"}, + {"274940", "10.0.1.8:11211"}, + {"275173", "10.0.1.4:11211"}, + {"275406", "10.0.1.6:11211"}, + {"275639", "10.0.1.2:11211"}, + {"275872", "10.0.1.3:11211"}, + {"276105", "10.0.1.5:11211"}, + {"276338", "10.0.1.2:11211"}, + {"276571", "10.0.1.4:11211"}, + {"276804", "10.0.1.4:11211"}, + {"277037", "10.0.1.6:11211"}, + {"277270", "10.0.1.6:11211"}, + {"277503", "10.0.1.1:11211"}, + {"277736", "10.0.1.6:11211"}, + {"277969", "10.0.1.7:11211"}, + {"278202", "10.0.1.3:11211"}, + {"278435", "10.0.1.6:11211"}, + {"278668", "10.0.1.3:11211"}, + {"278901", "10.0.1.1:11211"}, + {"279134", "10.0.1.7:11211"}, + {"279367", "10.0.1.4:11211"}, + {"279600", "10.0.1.6:11211"}, + {"279833", "10.0.1.8:11211"}, + {"280066", "10.0.1.7:11211"}, + {"280299", "10.0.1.8:11211"}, + {"280532", "10.0.1.5:11211"}, + {"280765", "10.0.1.8:11211"}, + {"280998", "10.0.1.1:11211"}, + {"281231", "10.0.1.5:11211"}, + {"281464", "10.0.1.8:11211"}, + {"281697", "10.0.1.2:11211"}, + {"281930", "10.0.1.7:11211"}, + {"282163", "10.0.1.5:11211"}, + {"282396", "10.0.1.4:11211"}, + {"282629", "10.0.1.7:11211"}, + {"282862", "10.0.1.7:11211"}, + {"283095", "10.0.1.5:11211"}, + {"283328", "10.0.1.2:11211"}, + {"283561", "10.0.1.1:11211"}, + {"283794", "10.0.1.8:11211"}, + {"284027", "10.0.1.2:11211"}, + {"284260", "10.0.1.6:11211"}, + {"284493", "10.0.1.2:11211"}, + {"284726", "10.0.1.2:11211"}, + {"284959", "10.0.1.6:11211"}, + {"285192", "10.0.1.3:11211"}, + {"285425", "10.0.1.6:11211"}, + {"285658", "10.0.1.2:11211"}, + {"285891", "10.0.1.8:11211"}, + {"286124", "10.0.1.3:11211"}, + {"286357", "10.0.1.4:11211"}, + {"286590", "10.0.1.5:11211"}, + {"286823", "10.0.1.7:11211"}, + {"287056", "10.0.1.6:11211"}, + {"287289", "10.0.1.3:11211"}, + {"287522", "10.0.1.3:11211"}, + {"287755", "10.0.1.3:11211"}, + {"287988", "10.0.1.6:11211"}, + {"288221", "10.0.1.8:11211"}, + {"288454", "10.0.1.4:11211"}, + {"288687", "10.0.1.3:11211"}, + {"288920", "10.0.1.5:11211"}, + {"289153", "10.0.1.8:11211"}, + {"289386", "10.0.1.7:11211"}, + {"289619", "10.0.1.8:11211"}, + {"289852", "10.0.1.8:11211"}, + {"290085", "10.0.1.3:11211"}, + {"290318", "10.0.1.7:11211"}, + {"290551", "10.0.1.8:11211"}, + {"290784", "10.0.1.7:11211"}, + {"291017", "10.0.1.5:11211"}, + {"291250", "10.0.1.8:11211"}, + {"291483", "10.0.1.3:11211"}, + {"291716", "10.0.1.3:11211"}, + {"291949", "10.0.1.1:11211"}, + {"292182", "10.0.1.8:11211"}, + {"292415", "10.0.1.5:11211"}, + {"292648", "10.0.1.8:11211"}, + {"292881", "10.0.1.5:11211"}, + {"293114", "10.0.1.7:11211"}, + {"293347", "10.0.1.5:11211"}, + {"293580", "10.0.1.5:11211"}, + {"293813", "10.0.1.5:11211"}, + {"294046", "10.0.1.3:11211"}, + {"294279", "10.0.1.6:11211"}, + {"294512", "10.0.1.8:11211"}, + {"294745", "10.0.1.7:11211"}, + {"294978", "10.0.1.7:11211"}, + {"295211", "10.0.1.2:11211"}, + {"295444", "10.0.1.5:11211"}, + {"295677", "10.0.1.7:11211"}, + {"295910", "10.0.1.3:11211"}, + {"296143", "10.0.1.7:11211"}, + {"296376", "10.0.1.4:11211"}, + {"296609", "10.0.1.3:11211"}, + {"296842", "10.0.1.6:11211"}, + {"297075", "10.0.1.2:11211"}, + {"297308", "10.0.1.4:11211"}, + {"297541", "10.0.1.8:11211"}, + {"297774", "10.0.1.3:11211"}, + {"298007", "10.0.1.3:11211"}, + {"298240", "10.0.1.5:11211"}, + {"298473", "10.0.1.4:11211"}, + {"298706", "10.0.1.5:11211"}, + {"298939", "10.0.1.7:11211"}, + {"303599", "10.0.1.6:11211"}, + {"303832", "10.0.1.4:11211"}, + {"304065", "10.0.1.3:11211"}, + {"304298", "10.0.1.1:11211"}, + {"304531", "10.0.1.1:11211"}, + {"304764", "10.0.1.8:11211"}, + {"304997", "10.0.1.7:11211"}, + {"305230", "10.0.1.6:11211"}, + {"305463", "10.0.1.2:11211"}, + {"305696", "10.0.1.7:11211"}, + {"305929", "10.0.1.4:11211"}, + {"306162", "10.0.1.4:11211"}, + {"306395", "10.0.1.4:11211"}, + {"306628", "10.0.1.8:11211"}, + {"306861", "10.0.1.7:11211"}, + {"307094", "10.0.1.4:11211"}, + {"307327", "10.0.1.8:11211"}, + {"307560", "10.0.1.2:11211"}, + {"307793", "10.0.1.8:11211"}, + {"308026", "10.0.1.5:11211"}, + {"308259", "10.0.1.6:11211"}, + {"308492", "10.0.1.2:11211"}, + {"308725", "10.0.1.6:11211"}, + {"308958", "10.0.1.3:11211"}, + {"309191", "10.0.1.7:11211"}, + {"309424", "10.0.1.7:11211"}, + {"309657", "10.0.1.8:11211"}, + {"309890", "10.0.1.8:11211"}, + {"310123", "10.0.1.1:11211"}, + {"310356", "10.0.1.3:11211"}, + {"310589", "10.0.1.5:11211"}, + {"310822", "10.0.1.8:11211"}, + {"311055", "10.0.1.3:11211"}, + {"311288", "10.0.1.8:11211"}, + {"311521", "10.0.1.8:11211"}, + {"311754", "10.0.1.3:11211"}, + {"311987", "10.0.1.7:11211"}, + {"312220", "10.0.1.5:11211"}, + {"312453", "10.0.1.2:11211"}, + {"312686", "10.0.1.2:11211"}, + {"312919", "10.0.1.1:11211"}, + {"313152", "10.0.1.8:11211"}, + {"313385", "10.0.1.2:11211"}, + {"313618", "10.0.1.8:11211"}, + {"313851", "10.0.1.4:11211"}, + {"314084", "10.0.1.4:11211"}, + {"314317", "10.0.1.6:11211"}, + {"314550", "10.0.1.5:11211"}, + {"314783", "10.0.1.1:11211"}, + {"315016", "10.0.1.1:11211"}, + {"315249", "10.0.1.6:11211"}, + {"315482", "10.0.1.4:11211"}, + {"315715", "10.0.1.2:11211"}, + {"315948", "10.0.1.7:11211"}, + {"316181", "10.0.1.2:11211"}, + {"316414", "10.0.1.8:11211"}, + {"316647", "10.0.1.7:11211"}, + {"316880", "10.0.1.7:11211"}, + {"317113", "10.0.1.8:11211"}, + {"317346", "10.0.1.7:11211"}, + {"317579", "10.0.1.4:11211"}, + {"317812", "10.0.1.2:11211"}, + {"318045", "10.0.1.1:11211"}, + {"318278", "10.0.1.6:11211"}, + {"318511", "10.0.1.6:11211"}, + {"318744", "10.0.1.1:11211"}, + {"318977", "10.0.1.5:11211"}, + {"319210", "10.0.1.2:11211"}, + {"319443", "10.0.1.4:11211"}, + {"319676", "10.0.1.7:11211"}, + {"319909", "10.0.1.3:11211"}, + {"320142", "10.0.1.8:11211"}, + {"320375", "10.0.1.1:11211"}, + {"320608", "10.0.1.8:11211"}, + {"320841", "10.0.1.1:11211"}, + {"321074", "10.0.1.5:11211"}, + {"321307", "10.0.1.2:11211"}, + {"321540", "10.0.1.3:11211"}, + {"321773", "10.0.1.6:11211"}, + {"322006", "10.0.1.3:11211"}, + {"322239", "10.0.1.7:11211"}, + {"322472", "10.0.1.6:11211"}, + {"322705", "10.0.1.8:11211"}, + {"322938", "10.0.1.4:11211"}, + {"323171", "10.0.1.7:11211"}, + {"323404", "10.0.1.2:11211"}, + {"323637", "10.0.1.4:11211"}, + {"323870", "10.0.1.2:11211"}, + {"324103", "10.0.1.7:11211"}, + {"324336", "10.0.1.3:11211"}, + {"324569", "10.0.1.2:11211"}, + {"324802", "10.0.1.3:11211"}, + {"325035", "10.0.1.8:11211"}, + {"325268", "10.0.1.5:11211"}, + {"325501", "10.0.1.6:11211"}, + {"325734", "10.0.1.3:11211"}, + {"325967", "10.0.1.5:11211"}, + {"326200", "10.0.1.1:11211"}, + {"326433", "10.0.1.8:11211"}, + {"326666", "10.0.1.4:11211"}, + {"326899", "10.0.1.2:11211"}, + {"327132", "10.0.1.4:11211"}, + {"327365", "10.0.1.1:11211"}, + {"327598", "10.0.1.3:11211"}, + {"327831", "10.0.1.6:11211"}, + {"328064", "10.0.1.4:11211"}, + {"328297", "10.0.1.6:11211"}, + {"328530", "10.0.1.8:11211"}, + {"328763", "10.0.1.2:11211"}, + {"328996", "10.0.1.1:11211"}, + {"329229", "10.0.1.3:11211"}, + {"329462", "10.0.1.7:11211"}, + {"329695", "10.0.1.6:11211"}, + {"329928", "10.0.1.1:11211"}, + {"330161", "10.0.1.7:11211"}, + {"330394", "10.0.1.1:11211"}, + {"330627", "10.0.1.3:11211"}, + {"330860", "10.0.1.8:11211"}, + {"331093", "10.0.1.2:11211"}, + {"331326", "10.0.1.6:11211"}, + {"331559", "10.0.1.4:11211"}, + {"331792", "10.0.1.7:11211"}, + {"332025", "10.0.1.8:11211"}, + {"332258", "10.0.1.2:11211"}, + {"332491", "10.0.1.5:11211"}, + {"332724", "10.0.1.6:11211"}, + {"332957", "10.0.1.4:11211"}, + {"333190", "10.0.1.8:11211"}, + {"333423", "10.0.1.7:11211"}, + {"333656", "10.0.1.7:11211"}, + {"333889", "10.0.1.1:11211"}, + {"334122", "10.0.1.5:11211"}, + {"334355", "10.0.1.4:11211"}, + {"334588", "10.0.1.8:11211"}, + {"334821", "10.0.1.3:11211"}, + {"335054", "10.0.1.3:11211"}, + {"335287", "10.0.1.7:11211"}, + {"335520", "10.0.1.8:11211"}, + {"335753", "10.0.1.2:11211"}, + {"335986", "10.0.1.3:11211"}, + {"336219", "10.0.1.2:11211"}, + {"336452", "10.0.1.8:11211"}, + {"336685", "10.0.1.2:11211"}, + {"336918", "10.0.1.7:11211"}, + {"337151", "10.0.1.4:11211"}, + {"337384", "10.0.1.6:11211"}, + {"337617", "10.0.1.1:11211"}, + {"337850", "10.0.1.2:11211"}, + {"338083", "10.0.1.7:11211"}, + {"338316", "10.0.1.8:11211"}, + {"338549", "10.0.1.5:11211"}, + {"338782", "10.0.1.8:11211"}, + {"339015", "10.0.1.8:11211"}, + {"339248", "10.0.1.5:11211"}, + {"339481", "10.0.1.8:11211"}, + {"339714", "10.0.1.3:11211"}, + {"339947", "10.0.1.7:11211"}, + {"340180", "10.0.1.5:11211"}, + {"340413", "10.0.1.5:11211"}, + {"340646", "10.0.1.8:11211"}, + {"340879", "10.0.1.3:11211"}, + {"341112", "10.0.1.6:11211"}, + {"341345", "10.0.1.8:11211"}, + {"341578", "10.0.1.7:11211"}, + {"341811", "10.0.1.2:11211"}, + {"342044", "10.0.1.1:11211"}, + {"342277", "10.0.1.4:11211"}, + {"342510", "10.0.1.5:11211"}, + {"342743", "10.0.1.5:11211"}, + {"342976", "10.0.1.4:11211"}, + {"343209", "10.0.1.7:11211"}, + {"343442", "10.0.1.8:11211"}, + {"343675", "10.0.1.1:11211"}, + {"343908", "10.0.1.2:11211"}, + {"348801", "10.0.1.2:11211"}, + {"349034", "10.0.1.7:11211"}, + {"349267", "10.0.1.5:11211"}, + {"349500", "10.0.1.7:11211"}, + {"349733", "10.0.1.8:11211"}, + {"349966", "10.0.1.3:11211"}, + {"350199", "10.0.1.8:11211"}, + {"350432", "10.0.1.4:11211"}, + {"350665", "10.0.1.5:11211"}, + {"350898", "10.0.1.2:11211"}, + {"351131", "10.0.1.5:11211"}, + {"351364", "10.0.1.8:11211"}, + {"351597", "10.0.1.3:11211"}, + {"351830", "10.0.1.5:11211"}, + {"352063", "10.0.1.3:11211"}, + {"352296", "10.0.1.1:11211"}, + {"352529", "10.0.1.4:11211"}, + {"352762", "10.0.1.5:11211"}, + {"352995", "10.0.1.1:11211"}, + {"353228", "10.0.1.8:11211"}, + {"357888", "10.0.1.5:11211"}, + {"358121", "10.0.1.8:11211"}, + {"358354", "10.0.1.1:11211"}, + {"358587", "10.0.1.1:11211"}, + {"358820", "10.0.1.7:11211"}, + {"359053", "10.0.1.4:11211"}, + {"359286", "10.0.1.2:11211"}, + {"359519", "10.0.1.3:11211"}, + {"359752", "10.0.1.1:11211"}, + {"359985", "10.0.1.1:11211"}, + {"360218", "10.0.1.4:11211"}, + {"360451", "10.0.1.5:11211"}, + {"360684", "10.0.1.2:11211"}, + {"360917", "10.0.1.4:11211"}, + {"361150", "10.0.1.1:11211"}, + {"361383", "10.0.1.2:11211"}, + {"361616", "10.0.1.3:11211"}, + {"361849", "10.0.1.3:11211"}, + {"362082", "10.0.1.2:11211"}, + {"362315", "10.0.1.4:11211"}, + {"362548", "10.0.1.5:11211"}, + {"362781", "10.0.1.6:11211"}, + {"363014", "10.0.1.7:11211"}, + {"363247", "10.0.1.8:11211"}, + {"363480", "10.0.1.7:11211"}, + {"363713", "10.0.1.4:11211"}, + {"363946", "10.0.1.4:11211"}, + {"364179", "10.0.1.2:11211"}, + {"364412", "10.0.1.5:11211"}, + {"364645", "10.0.1.8:11211"}, + {"364878", "10.0.1.1:11211"}, + {"365111", "10.0.1.6:11211"}, + {"365344", "10.0.1.5:11211"}, + {"365577", "10.0.1.6:11211"}, + {"365810", "10.0.1.8:11211"}, + {"366043", "10.0.1.5:11211"}, + {"366276", "10.0.1.4:11211"}, + {"366509", "10.0.1.6:11211"}, + {"366742", "10.0.1.7:11211"}, + {"374431", "10.0.1.4:11211"}, + {"374664", "10.0.1.8:11211"}, + {"374897", "10.0.1.6:11211"}, + {"375130", "10.0.1.3:11211"}, + {"375363", "10.0.1.4:11211"}, + {"375596", "10.0.1.8:11211"}, + {"375829", "10.0.1.1:11211"}, + {"376062", "10.0.1.3:11211"}, + {"376295", "10.0.1.1:11211"}, + {"376528", "10.0.1.1:11211"}, + {"376761", "10.0.1.4:11211"}, + {"376994", "10.0.1.7:11211"}, + {"377227", "10.0.1.2:11211"}, + {"377460", "10.0.1.8:11211"}, + {"377693", "10.0.1.1:11211"}, + {"377926", "10.0.1.5:11211"}, + {"378159", "10.0.1.1:11211"}, + {"378392", "10.0.1.8:11211"}, + {"378625", "10.0.1.7:11211"}, + {"378858", "10.0.1.4:11211"}, + {"379091", "10.0.1.3:11211"}, + {"379324", "10.0.1.8:11211"}, + {"379557", "10.0.1.2:11211"}, + {"379790", "10.0.1.2:11211"}, + {"380023", "10.0.1.8:11211"}, + {"380256", "10.0.1.6:11211"}, + {"380489", "10.0.1.2:11211"}, + {"380722", "10.0.1.8:11211"}, + {"380955", "10.0.1.5:11211"}, + {"381188", "10.0.1.8:11211"}, + {"381421", "10.0.1.8:11211"}, + {"381654", "10.0.1.4:11211"}, + {"381887", "10.0.1.7:11211"}, + {"382120", "10.0.1.7:11211"}, + {"382353", "10.0.1.3:11211"}, + {"382586", "10.0.1.6:11211"}, + {"382819", "10.0.1.2:11211"}, + {"383052", "10.0.1.8:11211"}, + {"383285", "10.0.1.7:11211"}, + {"383518", "10.0.1.8:11211"}, + {"383751", "10.0.1.2:11211"}, + {"383984", "10.0.1.6:11211"}, + {"384217", "10.0.1.6:11211"}, + {"384450", "10.0.1.3:11211"}, + {"384683", "10.0.1.8:11211"}, + {"384916", "10.0.1.3:11211"}, + {"385149", "10.0.1.1:11211"}, + {"385382", "10.0.1.2:11211"}, + {"385615", "10.0.1.1:11211"}, + {"385848", "10.0.1.5:11211"}, + {"386081", "10.0.1.5:11211"}, + {"386314", "10.0.1.6:11211"}, + {"386547", "10.0.1.3:11211"}, + {"386780", "10.0.1.4:11211"}, + {"387013", "10.0.1.7:11211"}, + {"387246", "10.0.1.8:11211"}, + {"387479", "10.0.1.3:11211"}, + {"387712", "10.0.1.8:11211"}, + {"387945", "10.0.1.8:11211"}, + {"388178", "10.0.1.6:11211"}, + {"388411", "10.0.1.1:11211"}, + {"388644", "10.0.1.8:11211"}, + {"388877", "10.0.1.7:11211"}, + {"389110", "10.0.1.1:11211"}, + {"389343", "10.0.1.1:11211"}, + {"389576", "10.0.1.8:11211"}, + {"389809", "10.0.1.2:11211"}, + {"390042", "10.0.1.5:11211"}, + {"390275", "10.0.1.7:11211"}, + {"390508", "10.0.1.2:11211"}, + {"390741", "10.0.1.2:11211"}, + {"390974", "10.0.1.4:11211"}, + {"391207", "10.0.1.3:11211"}, + {"391440", "10.0.1.4:11211"}, + {"391673", "10.0.1.4:11211"}, + {"391906", "10.0.1.7:11211"}, + {"392139", "10.0.1.3:11211"}, + {"392372", "10.0.1.6:11211"}, + {"392605", "10.0.1.6:11211"}, + {"392838", "10.0.1.6:11211"}, + {"393071", "10.0.1.2:11211"}, + {"393304", "10.0.1.2:11211"}, + {"393537", "10.0.1.2:11211"}, + {"393770", "10.0.1.4:11211"}, + {"394003", "10.0.1.1:11211"}, + {"394236", "10.0.1.6:11211"}, + {"394469", "10.0.1.4:11211"}, + {"394702", "10.0.1.6:11211"}, + {"394935", "10.0.1.7:11211"}, + {"395168", "10.0.1.4:11211"}, + {"395401", "10.0.1.3:11211"}, + {"395634", "10.0.1.4:11211"}, + {"395867", "10.0.1.1:11211"}, + {"396100", "10.0.1.6:11211"}, + {"396333", "10.0.1.4:11211"}, + {"396566", "10.0.1.8:11211"}, + {"396799", "10.0.1.3:11211"}, + {"397032", "10.0.1.4:11211"}, + {"397265", "10.0.1.3:11211"}, + {"397498", "10.0.1.5:11211"}, + {"397731", "10.0.1.1:11211"}, + {"397964", "10.0.1.2:11211"}, + {"398197", "10.0.1.8:11211"}, + {"398430", "10.0.1.7:11211"}, + {"398663", "10.0.1.1:11211"}, + {"398896", "10.0.1.5:11211"}, + {"399129", "10.0.1.5:11211"}, + {"399362", "10.0.1.8:11211"}, + {"399595", "10.0.1.8:11211"}, + {"399828", "10.0.1.7:11211"}, + {"400061", "10.0.1.6:11211"}, + {"400294", "10.0.1.8:11211"}, + {"400527", "10.0.1.2:11211"}, + {"400760", "10.0.1.7:11211"}, + {"400993", "10.0.1.7:11211"}, + {"401226", "10.0.1.1:11211"}, + {"401459", "10.0.1.1:11211"}, + {"401692", "10.0.1.2:11211"}, + {"401925", "10.0.1.1:11211"}, + {"402158", "10.0.1.1:11211"}, + {"402391", "10.0.1.5:11211"}, + {"402624", "10.0.1.1:11211"}, + {"402857", "10.0.1.8:11211"}, + {"403090", "10.0.1.1:11211"}, + {"403323", "10.0.1.5:11211"}, + {"403556", "10.0.1.6:11211"}, + {"403789", "10.0.1.6:11211"}, + {"404022", "10.0.1.2:11211"}, + {"404255", "10.0.1.4:11211"}, + {"404488", "10.0.1.1:11211"}, + {"404721", "10.0.1.8:11211"}, + {"404954", "10.0.1.1:11211"}, + {"405187", "10.0.1.7:11211"}, + {"405420", "10.0.1.7:11211"}, + {"405653", "10.0.1.7:11211"}, + {"405886", "10.0.1.1:11211"}, + {"406119", "10.0.1.3:11211"}, + {"406352", "10.0.1.5:11211"}, + {"411944", "10.0.1.1:11211"}, + {"412177", "10.0.1.8:11211"}, + {"412410", "10.0.1.3:11211"}, + {"412643", "10.0.1.7:11211"}, + {"412876", "10.0.1.4:11211"}, + {"413109", "10.0.1.8:11211"}, + {"413342", "10.0.1.7:11211"}, + {"413575", "10.0.1.8:11211"}, + {"413808", "10.0.1.7:11211"}, + {"414041", "10.0.1.1:11211"}, + {"414274", "10.0.1.5:11211"}, + {"414507", "10.0.1.1:11211"}, + {"414740", "10.0.1.8:11211"}, + {"414973", "10.0.1.2:11211"}, + {"415206", "10.0.1.8:11211"}, + {"415439", "10.0.1.6:11211"}, + {"415672", "10.0.1.1:11211"}, + {"415905", "10.0.1.7:11211"}, + {"416138", "10.0.1.6:11211"}, + {"416371", "10.0.1.8:11211"}, + {"416604", "10.0.1.4:11211"}, + {"416837", "10.0.1.4:11211"}, + {"417070", "10.0.1.4:11211"}, + {"417303", "10.0.1.6:11211"}, + {"417536", "10.0.1.6:11211"}, + {"417769", "10.0.1.8:11211"}, + {"418002", "10.0.1.3:11211"}, + {"418235", "10.0.1.8:11211"}, + {"418468", "10.0.1.4:11211"}, + {"418701", "10.0.1.4:11211"}, + {"418934", "10.0.1.7:11211"}, + {"419167", "10.0.1.6:11211"}, + {"419400", "10.0.1.3:11211"}, + {"419633", "10.0.1.5:11211"}, + {"419866", "10.0.1.8:11211"}, + {"420099", "10.0.1.6:11211"}, + {"420332", "10.0.1.3:11211"}, + {"420565", "10.0.1.5:11211"}, + {"420798", "10.0.1.8:11211"}, + {"421031", "10.0.1.8:11211"}, + {"421264", "10.0.1.6:11211"}, + {"426856", "10.0.1.3:11211"}, + {"427089", "10.0.1.5:11211"}, + {"427322", "10.0.1.1:11211"}, + {"427555", "10.0.1.6:11211"}, + {"427788", "10.0.1.5:11211"}, + {"428021", "10.0.1.8:11211"}, + {"428254", "10.0.1.3:11211"}, + {"428487", "10.0.1.7:11211"}, + {"428720", "10.0.1.4:11211"}, + {"428953", "10.0.1.4:11211"}, + {"429186", "10.0.1.8:11211"}, + {"429419", "10.0.1.6:11211"}, + {"429652", "10.0.1.8:11211"}, + {"429885", "10.0.1.6:11211"}, + {"430118", "10.0.1.5:11211"}, + {"430351", "10.0.1.4:11211"}, + {"430584", "10.0.1.6:11211"}, + {"430817", "10.0.1.5:11211"}, + {"431050", "10.0.1.3:11211"}, + {"431283", "10.0.1.8:11211"}, + {"431516", "10.0.1.1:11211"}, + {"431749", "10.0.1.4:11211"}, + {"431982", "10.0.1.5:11211"}, + {"432215", "10.0.1.6:11211"}, + {"432448", "10.0.1.6:11211"}, + {"432681", "10.0.1.4:11211"}, + {"432914", "10.0.1.2:11211"}, + {"433147", "10.0.1.7:11211"}, + {"433380", "10.0.1.6:11211"}, + {"433613", "10.0.1.1:11211"}, + {"433846", "10.0.1.3:11211"}, + {"434079", "10.0.1.1:11211"}, + {"434312", "10.0.1.2:11211"}, + {"434545", "10.0.1.4:11211"}, + {"434778", "10.0.1.3:11211"}, + {"435011", "10.0.1.5:11211"}, + {"435244", "10.0.1.2:11211"}, + {"435477", "10.0.1.4:11211"}, + {"435710", "10.0.1.5:11211"}, + {"435943", "10.0.1.6:11211"}, + {"436176", "10.0.1.8:11211"}, + {"436409", "10.0.1.5:11211"}, + {"436642", "10.0.1.1:11211"}, + {"436875", "10.0.1.8:11211"}, + {"437108", "10.0.1.6:11211"}, + {"437341", "10.0.1.2:11211"}, + {"437574", "10.0.1.1:11211"}, + {"437807", "10.0.1.8:11211"}, + {"438040", "10.0.1.7:11211"}, + {"438273", "10.0.1.3:11211"}, + {"438506", "10.0.1.4:11211"}, + {"438739", "10.0.1.8:11211"}, + {"438972", "10.0.1.7:11211"}, + {"439205", "10.0.1.8:11211"}, + {"439438", "10.0.1.1:11211"}, + {"439671", "10.0.1.3:11211"}, + {"439904", "10.0.1.3:11211"}, + {"440137", "10.0.1.4:11211"}, + {"440370", "10.0.1.3:11211"}, + {"457612", "10.0.1.8:11211"}, + {"457845", "10.0.1.3:11211"}, + {"458078", "10.0.1.1:11211"}, + {"458311", "10.0.1.3:11211"}, + {"458544", "10.0.1.7:11211"}, + {"458777", "10.0.1.7:11211"}, + {"459010", "10.0.1.5:11211"}, + {"459243", "10.0.1.6:11211"}, + {"459476", "10.0.1.6:11211"}, + {"459709", "10.0.1.6:11211"}, + {"459942", "10.0.1.1:11211"}, + {"460175", "10.0.1.1:11211"}, + {"460408", "10.0.1.7:11211"}, + {"460641", "10.0.1.5:11211"}, + {"460874", "10.0.1.5:11211"}, + {"461107", "10.0.1.3:11211"}, + {"461340", "10.0.1.4:11211"}, + {"461573", "10.0.1.7:11211"}, + {"461806", "10.0.1.7:11211"}, + {"462039", "10.0.1.4:11211"}, + {"462272", "10.0.1.1:11211"}, + {"462505", "10.0.1.8:11211"}, + {"462738", "10.0.1.5:11211"}, + {"462971", "10.0.1.3:11211"}, + {"463204", "10.0.1.8:11211"}, + {"463437", "10.0.1.8:11211"}, + {"463670", "10.0.1.5:11211"}, + {"463903", "10.0.1.1:11211"}, + {"464136", "10.0.1.5:11211"}, + {"464369", "10.0.1.6:11211"}, + {"464602", "10.0.1.7:11211"}, + {"464835", "10.0.1.2:11211"}, + {"465068", "10.0.1.2:11211"}, + {"465301", "10.0.1.1:11211"}, + {"465534", "10.0.1.8:11211"}, + {"465767", "10.0.1.6:11211"}, + {"466000", "10.0.1.1:11211"}, + {"466233", "10.0.1.6:11211"}, + {"466466", "10.0.1.3:11211"}, + {"466699", "10.0.1.6:11211"}, + {"466932", "10.0.1.4:11211"}, + {"467165", "10.0.1.4:11211"}, + {"467398", "10.0.1.2:11211"}, + {"467631", "10.0.1.7:11211"}, + {"467864", "10.0.1.3:11211"}, + {"468097", "10.0.1.6:11211"}, + {"468330", "10.0.1.1:11211"}, + {"468563", "10.0.1.1:11211"}, + {"468796", "10.0.1.7:11211"}, + {"469029", "10.0.1.6:11211"}, + {"469262", "10.0.1.3:11211"}, + {"469495", "10.0.1.6:11211"}, + {"469728", "10.0.1.6:11211"}, + {"469961", "10.0.1.4:11211"}, + {"470194", "10.0.1.5:11211"}, + {"470427", "10.0.1.6:11211"}, + {"470660", "10.0.1.1:11211"}, + {"470893", "10.0.1.4:11211"}, + {"471126", "10.0.1.7:11211"}, + {"471359", "10.0.1.7:11211"}, + {"471592", "10.0.1.1:11211"}, + {"471825", "10.0.1.5:11211"}, + {"472058", "10.0.1.6:11211"}, + {"472291", "10.0.1.7:11211"}, + {"472524", "10.0.1.8:11211"}, + {"472757", "10.0.1.2:11211"}, + {"472990", "10.0.1.2:11211"}, + {"473223", "10.0.1.7:11211"}, + {"473456", "10.0.1.7:11211"}, + {"473689", "10.0.1.5:11211"}, + {"473922", "10.0.1.7:11211"}, + {"474155", "10.0.1.6:11211"}, + {"474388", "10.0.1.8:11211"}, + {"474621", "10.0.1.3:11211"}, + {"474854", "10.0.1.1:11211"}, + {"475087", "10.0.1.1:11211"}, + {"475320", "10.0.1.4:11211"}, + {"475553", "10.0.1.6:11211"}, + {"475786", "10.0.1.4:11211"}, + {"476019", "10.0.1.5:11211"}, + {"476252", "10.0.1.1:11211"}, + {"476485", "10.0.1.4:11211"}, + {"476718", "10.0.1.6:11211"}, + {"476951", "10.0.1.8:11211"}, + {"477184", "10.0.1.8:11211"}, + {"477417", "10.0.1.3:11211"}, + {"477650", "10.0.1.1:11211"}, + {"477883", "10.0.1.1:11211"}, + {"478116", "10.0.1.2:11211"}, + {"478349", "10.0.1.5:11211"}, + {"478582", "10.0.1.4:11211"}, + {"478815", "10.0.1.4:11211"}, + {"479048", "10.0.1.1:11211"}, + {"479281", "10.0.1.1:11211"}, + {"479514", "10.0.1.2:11211"}, + {"479747", "10.0.1.5:11211"}, + {"479980", "10.0.1.7:11211"}, + {"480213", "10.0.1.3:11211"}, + {"480446", "10.0.1.1:11211"}, + {"480679", "10.0.1.4:11211"}, + {"480912", "10.0.1.6:11211"}, + {"481145", "10.0.1.2:11211"}, + {"481378", "10.0.1.1:11211"}, + {"481611", "10.0.1.7:11211"}, + {"481844", "10.0.1.3:11211"}, + {"482077", "10.0.1.4:11211"}, + {"482310", "10.0.1.4:11211"}, + {"482543", "10.0.1.3:11211"}, + {"482776", "10.0.1.8:11211"}, + {"483009", "10.0.1.7:11211"}, + {"483242", "10.0.1.6:11211"}, + {"483475", "10.0.1.3:11211"}, + {"483708", "10.0.1.5:11211"}, + {"483941", "10.0.1.8:11211"}, + {"484174", "10.0.1.7:11211"}, + {"484407", "10.0.1.2:11211"}, + {"484640", "10.0.1.8:11211"}, + {"484873", "10.0.1.8:11211"}, + {"485106", "10.0.1.4:11211"}, + {"485339", "10.0.1.2:11211"}, + {"485572", "10.0.1.6:11211"}, + {"485805", "10.0.1.5:11211"}, + {"486038", "10.0.1.3:11211"}, + {"486271", "10.0.1.5:11211"}, + {"486504", "10.0.1.1:11211"}, + {"486737", "10.0.1.5:11211"}, + {"486970", "10.0.1.3:11211"}, + {"487203", "10.0.1.2:11211"}, + {"487436", "10.0.1.8:11211"}, + {"487669", "10.0.1.3:11211"}, + {"487902", "10.0.1.7:11211"}, + {"488135", "10.0.1.2:11211"}, + {"488368", "10.0.1.7:11211"}, + {"488601", "10.0.1.8:11211"}, + {"488834", "10.0.1.8:11211"}, + {"489067", "10.0.1.5:11211"}, + {"489300", "10.0.1.4:11211"}, + {"489533", "10.0.1.5:11211"}, + {"489766", "10.0.1.5:11211"}, + {"489999", "10.0.1.3:11211"}, + {"490232", "10.0.1.4:11211"}, + {"490465", "10.0.1.2:11211"}, + {"490698", "10.0.1.1:11211"}, + {"490931", "10.0.1.2:11211"}, + {"491164", "10.0.1.5:11211"}, + {"491397", "10.0.1.5:11211"}, + {"491630", "10.0.1.2:11211"}, + {"491863", "10.0.1.1:11211"}, + {"492096", "10.0.1.1:11211"}, + {"492329", "10.0.1.5:11211"}, + {"492562", "10.0.1.7:11211"}, + {"492795", "10.0.1.3:11211"}, + {"493028", "10.0.1.1:11211"}, + {"493261", "10.0.1.2:11211"}, + {"493494", "10.0.1.3:11211"}, + {"493727", "10.0.1.6:11211"}, + {"493960", "10.0.1.5:11211"}, + {"494193", "10.0.1.6:11211"}, + {"494426", "10.0.1.6:11211"}, + {"494659", "10.0.1.4:11211"}, + {"494892", "10.0.1.4:11211"}, + {"495125", "10.0.1.1:11211"}, + {"495358", "10.0.1.3:11211"}, + {"495591", "10.0.1.6:11211"}, + {"495824", "10.0.1.5:11211"}, + {"496057", "10.0.1.7:11211"}, + {"496290", "10.0.1.5:11211"}, + {"496523", "10.0.1.5:11211"}, + {"496756", "10.0.1.1:11211"}, + {"496989", "10.0.1.3:11211"}, + {"497222", "10.0.1.8:11211"}, + {"497455", "10.0.1.6:11211"}, + {"497688", "10.0.1.7:11211"}, + {"497921", "10.0.1.5:11211"}, + {"498154", "10.0.1.4:11211"}, + {"498387", "10.0.1.3:11211"}, + {"498620", "10.0.1.3:11211"}, + {"498853", "10.0.1.3:11211"}, + {"499086", "10.0.1.1:11211"}, + {"499319", "10.0.1.8:11211"}, + {"499552", "10.0.1.3:11211"}, + {"499785", "10.0.1.2:11211"}, + {"500018", "10.0.1.5:11211"}, + {"500251", "10.0.1.8:11211"}, + {"500484", "10.0.1.7:11211"}, + {"500717", "10.0.1.4:11211"}, + {"500950", "10.0.1.5:11211"}, + {"501183", "10.0.1.4:11211"}, + {"501416", "10.0.1.4:11211"}, + {"501649", "10.0.1.2:11211"}, + {"501882", "10.0.1.1:11211"}, + {"502115", "10.0.1.3:11211"}, + {"502348", "10.0.1.3:11211"}, + {"502581", "10.0.1.8:11211"}, + {"502814", "10.0.1.1:11211"}, + {"503047", "10.0.1.8:11211"}, + {"503280", "10.0.1.7:11211"}, + {"503513", "10.0.1.1:11211"}, + {"503746", "10.0.1.1:11211"}, + {"503979", "10.0.1.8:11211"}, + {"504212", "10.0.1.3:11211"}, + {"504445", "10.0.1.2:11211"}, + {"504678", "10.0.1.2:11211"}, + {"504911", "10.0.1.3:11211"}, + {"505144", "10.0.1.5:11211"}, + {"505377", "10.0.1.6:11211"}, + {"505610", "10.0.1.1:11211"}, + {"505843", "10.0.1.6:11211"}, + {"506076", "10.0.1.1:11211"}, + {"506309", "10.0.1.2:11211"}, + {"506542", "10.0.1.3:11211"}, + {"506775", "10.0.1.8:11211"}, + {"507008", "10.0.1.2:11211"}, + {"507241", "10.0.1.3:11211"}, + {"507474", "10.0.1.6:11211"}, + {"507707", "10.0.1.7:11211"}, + {"507940", "10.0.1.4:11211"}, + {"508173", "10.0.1.5:11211"}, + {"508406", "10.0.1.1:11211"}, + {"508639", "10.0.1.3:11211"}, + {"508872", "10.0.1.4:11211"}, + {"509105", "10.0.1.7:11211"}, + {"509338", "10.0.1.2:11211"}, + {"509571", "10.0.1.3:11211"}, + {"509804", "10.0.1.3:11211"}, + {"510037", "10.0.1.7:11211"}, + {"510270", "10.0.1.4:11211"}, + {"510503", "10.0.1.1:11211"}, + {"510736", "10.0.1.3:11211"}, + {"510969", "10.0.1.6:11211"}, + {"511202", "10.0.1.2:11211"}, + {"511435", "10.0.1.2:11211"}, + {"511668", "10.0.1.4:11211"}, + {"511901", "10.0.1.6:11211"}, + {"512134", "10.0.1.2:11211"}, + {"512367", "10.0.1.1:11211"}, + {"512600", "10.0.1.1:11211"}, + {"512833", "10.0.1.3:11211"}, + {"513066", "10.0.1.3:11211"}, + {"513299", "10.0.1.3:11211"}, + {"513532", "10.0.1.2:11211"}, + {"513765", "10.0.1.8:11211"}, + {"513998", "10.0.1.7:11211"}, + {"514231", "10.0.1.6:11211"}, + {"514464", "10.0.1.3:11211"}, + {"514697", "10.0.1.3:11211"}, + {"514930", "10.0.1.6:11211"}, + {"515163", "10.0.1.7:11211"}, + {"515396", "10.0.1.8:11211"}, + {"515629", "10.0.1.5:11211"}, + {"515862", "10.0.1.7:11211"}, + {"516095", "10.0.1.2:11211"}, + {"516328", "10.0.1.8:11211"}, + {"516561", "10.0.1.3:11211"}, + {"516794", "10.0.1.5:11211"}, + {"517027", "10.0.1.6:11211"}, + {"517260", "10.0.1.3:11211"}, + {"517493", "10.0.1.5:11211"}, + {"517726", "10.0.1.8:11211"}, + {"517959", "10.0.1.7:11211"}, + {"518192", "10.0.1.8:11211"}, + {"518425", "10.0.1.8:11211"}, + {"518658", "10.0.1.8:11211"}, + {"518891", "10.0.1.7:11211"}, + {"519124", "10.0.1.3:11211"}, + {"519357", "10.0.1.6:11211"}, + {"519590", "10.0.1.5:11211"}, + {"519823", "10.0.1.3:11211"}, + {"520056", "10.0.1.2:11211"}, + {"520289", "10.0.1.8:11211"}, + {"520522", "10.0.1.5:11211"}, + {"533337", "10.0.1.3:11211"}, + {"533570", "10.0.1.3:11211"}, + {"533803", "10.0.1.2:11211"}, + {"534036", "10.0.1.4:11211"}, + {"534269", "10.0.1.1:11211"}, + {"534502", "10.0.1.2:11211"}, + {"534735", "10.0.1.4:11211"}, + {"534968", "10.0.1.8:11211"}, + {"535201", "10.0.1.1:11211"}, + {"535434", "10.0.1.1:11211"}, + {"535667", "10.0.1.3:11211"}, + {"535900", "10.0.1.7:11211"}, + {"536133", "10.0.1.8:11211"}, + {"541026", "10.0.1.1:11211"}, + {"541259", "10.0.1.2:11211"}, + {"541492", "10.0.1.6:11211"}, + {"541725", "10.0.1.8:11211"}, + {"541958", "10.0.1.1:11211"}, + {"542191", "10.0.1.6:11211"}, + {"542424", "10.0.1.7:11211"}, + {"542657", "10.0.1.2:11211"}, + {"542890", "10.0.1.4:11211"}, + {"543123", "10.0.1.5:11211"}, + {"543356", "10.0.1.3:11211"}, + {"543589", "10.0.1.7:11211"}, + {"547084", "10.0.1.2:11211"}, + {"547317", "10.0.1.3:11211"}, + {"547550", "10.0.1.7:11211"}, + {"547783", "10.0.1.7:11211"}, + {"548016", "10.0.1.2:11211"}, + {"548249", "10.0.1.5:11211"}, + {"548482", "10.0.1.7:11211"}, + {"548715", "10.0.1.6:11211"}, + {"548948", "10.0.1.4:11211"}, + {"549181", "10.0.1.3:11211"}, + {"549414", "10.0.1.3:11211"}, + {"549647", "10.0.1.4:11211"}, + {"549880", "10.0.1.7:11211"}, + {"550113", "10.0.1.6:11211"}, + {"550346", "10.0.1.4:11211"}, + {"550579", "10.0.1.6:11211"}, + {"550812", "10.0.1.4:11211"}, + {"551045", "10.0.1.5:11211"}, + {"551278", "10.0.1.6:11211"}, + {"551511", "10.0.1.5:11211"}, + {"551744", "10.0.1.6:11211"}, + {"551977", "10.0.1.8:11211"}, + {"552210", "10.0.1.2:11211"}, + {"552443", "10.0.1.2:11211"}, + {"552676", "10.0.1.1:11211"}, + {"552909", "10.0.1.4:11211"}, + {"553142", "10.0.1.7:11211"}, + {"553375", "10.0.1.2:11211"}, + {"553608", "10.0.1.5:11211"}, + {"553841", "10.0.1.5:11211"}, + {"554074", "10.0.1.5:11211"}, + {"554307", "10.0.1.7:11211"}, + {"554540", "10.0.1.6:11211"}, + {"554773", "10.0.1.3:11211"}, + {"555006", "10.0.1.3:11211"}, + {"555239", "10.0.1.5:11211"}, + {"555472", "10.0.1.8:11211"}, + {"555705", "10.0.1.8:11211"}, + {"555938", "10.0.1.6:11211"}, + {"556171", "10.0.1.4:11211"}, + {"556404", "10.0.1.4:11211"}, + {"556637", "10.0.1.8:11211"}, + {"556870", "10.0.1.5:11211"}, + {"557103", "10.0.1.3:11211"}, + {"557336", "10.0.1.3:11211"}, + {"557569", "10.0.1.8:11211"}, + {"557802", "10.0.1.1:11211"}, + {"558035", "10.0.1.1:11211"}, + {"558268", "10.0.1.4:11211"}, + {"558501", "10.0.1.3:11211"}, + {"558734", "10.0.1.6:11211"}, + {"558967", "10.0.1.7:11211"}, + {"559200", "10.0.1.3:11211"}, + {"559433", "10.0.1.1:11211"}, + {"559666", "10.0.1.4:11211"}, + {"559899", "10.0.1.2:11211"}, + {"560132", "10.0.1.2:11211"}, + {"560365", "10.0.1.6:11211"}, + {"560598", "10.0.1.8:11211"}, + {"560831", "10.0.1.3:11211"}, + {"561064", "10.0.1.7:11211"}, + {"561297", "10.0.1.1:11211"}, + {"561530", "10.0.1.7:11211"}, + {"561763", "10.0.1.7:11211"}, + {"561996", "10.0.1.1:11211"}, + {"562229", "10.0.1.8:11211"}, + {"562462", "10.0.1.6:11211"}, + {"562695", "10.0.1.7:11211"}, + {"562928", "10.0.1.2:11211"}, + {"563161", "10.0.1.8:11211"}, + {"563394", "10.0.1.8:11211"}, + {"563627", "10.0.1.6:11211"}, + {"563860", "10.0.1.2:11211"}, + {"564093", "10.0.1.5:11211"}, + {"564326", "10.0.1.4:11211"}, + {"564559", "10.0.1.8:11211"}, + {"564792", "10.0.1.1:11211"}, + {"565025", "10.0.1.3:11211"}, + {"565258", "10.0.1.7:11211"}, + {"565491", "10.0.1.7:11211"}, + {"565724", "10.0.1.5:11211"}, + {"565957", "10.0.1.4:11211"}, + {"566190", "10.0.1.4:11211"}, + {"566423", "10.0.1.8:11211"}, + {"566656", "10.0.1.4:11211"}, + {"566889", "10.0.1.3:11211"}, + {"567122", "10.0.1.1:11211"}, + {"567355", "10.0.1.8:11211"}, + {"567588", "10.0.1.6:11211"}, + {"567821", "10.0.1.3:11211"}, + {"568054", "10.0.1.5:11211"}, + {"568287", "10.0.1.8:11211"}, + {"568520", "10.0.1.8:11211"}, + {"568753", "10.0.1.4:11211"}, + {"568986", "10.0.1.5:11211"}, + {"569219", "10.0.1.8:11211"}, + {"569452", "10.0.1.6:11211"}, + {"569685", "10.0.1.1:11211"}, + {"569918", "10.0.1.3:11211"}, + {"570151", "10.0.1.4:11211"}, + {"570384", "10.0.1.4:11211"}, + {"570617", "10.0.1.8:11211"}, + {"570850", "10.0.1.5:11211"}, + {"571083", "10.0.1.8:11211"}, + {"571316", "10.0.1.3:11211"}, + {"571549", "10.0.1.3:11211"}, + {"571782", "10.0.1.2:11211"}, + {"572015", "10.0.1.6:11211"}, + {"572248", "10.0.1.3:11211"}, + {"572481", "10.0.1.7:11211"}, + {"572714", "10.0.1.6:11211"}, + {"572947", "10.0.1.8:11211"}, + {"573180", "10.0.1.7:11211"}, + {"573413", "10.0.1.1:11211"}, + {"573646", "10.0.1.1:11211"}, + {"573879", "10.0.1.1:11211"}, + {"574112", "10.0.1.1:11211"}, + {"574345", "10.0.1.1:11211"}, + {"574578", "10.0.1.6:11211"}, + {"574811", "10.0.1.5:11211"}, + {"575044", "10.0.1.2:11211"}, + {"575277", "10.0.1.1:11211"}, + {"575510", "10.0.1.1:11211"}, + {"575743", "10.0.1.5:11211"}, + {"575976", "10.0.1.4:11211"}, + {"576209", "10.0.1.6:11211"}, + {"576442", "10.0.1.5:11211"}, + {"576675", "10.0.1.2:11211"}, + {"576908", "10.0.1.3:11211"}, + {"577141", "10.0.1.7:11211"}, + {"577374", "10.0.1.5:11211"}, + {"577607", "10.0.1.5:11211"}, + {"577840", "10.0.1.5:11211"}, + {"578073", "10.0.1.1:11211"}, + {"578306", "10.0.1.2:11211"}, + {"578539", "10.0.1.7:11211"}, + {"578772", "10.0.1.5:11211"}, + {"579005", "10.0.1.6:11211"}, + {"579238", "10.0.1.4:11211"}, + {"579471", "10.0.1.2:11211"}, + {"579704", "10.0.1.7:11211"}, + {"579937", "10.0.1.6:11211"}, + {"580170", "10.0.1.4:11211"}, + {"585063", "10.0.1.1:11211"}, + {"585296", "10.0.1.6:11211"}, + {"585529", "10.0.1.4:11211"}, + {"585762", "10.0.1.4:11211"}, + {"585995", "10.0.1.2:11211"}, + {"586228", "10.0.1.7:11211"}, + {"586461", "10.0.1.8:11211"}, + {"586694", "10.0.1.3:11211"}, + {"586927", "10.0.1.7:11211"}, + {"587160", "10.0.1.4:11211"}, + {"587393", "10.0.1.8:11211"}, + {"587626", "10.0.1.7:11211"}, + {"587859", "10.0.1.7:11211"}, + {"588092", "10.0.1.1:11211"}, + {"588325", "10.0.1.8:11211"}, + {"588558", "10.0.1.3:11211"}, + {"588791", "10.0.1.1:11211"}, + {"589024", "10.0.1.5:11211"}, + {"589257", "10.0.1.4:11211"}, + {"589490", "10.0.1.1:11211"}, + {"589723", "10.0.1.8:11211"}, + {"589956", "10.0.1.8:11211"}, + {"590189", "10.0.1.1:11211"}, + {"590422", "10.0.1.8:11211"}, + {"590655", "10.0.1.4:11211"}, + {"590888", "10.0.1.8:11211"}, + {"591121", "10.0.1.1:11211"}, + {"591354", "10.0.1.6:11211"}, + {"591587", "10.0.1.7:11211"}, + {"591820", "10.0.1.2:11211"}, + {"592053", "10.0.1.7:11211"}, + {"592286", "10.0.1.8:11211"}, + {"592519", "10.0.1.8:11211"}, + {"592752", "10.0.1.5:11211"}, + {"592985", "10.0.1.5:11211"}, + {"593218", "10.0.1.5:11211"}, + {"593451", "10.0.1.8:11211"}, + {"593684", "10.0.1.2:11211"}, + {"593917", "10.0.1.3:11211"}, + {"594150", "10.0.1.5:11211"}, + {"594383", "10.0.1.8:11211"}, + {"594616", "10.0.1.8:11211"}, + {"594849", "10.0.1.2:11211"}, + {"595082", "10.0.1.3:11211"}, + {"595315", "10.0.1.7:11211"}, + {"595548", "10.0.1.8:11211"}, + {"595781", "10.0.1.2:11211"}, + {"596014", "10.0.1.5:11211"}, + {"596247", "10.0.1.3:11211"}, + {"596480", "10.0.1.3:11211"}, + {"596713", "10.0.1.3:11211"}, + {"596946", "10.0.1.8:11211"}, + {"597179", "10.0.1.1:11211"}, + {"597412", "10.0.1.4:11211"}, + {"597645", "10.0.1.3:11211"}, + {"597878", "10.0.1.4:11211"}, + {"598111", "10.0.1.3:11211"}, + {"598344", "10.0.1.3:11211"}, + {"598577", "10.0.1.7:11211"}, + {"598810", "10.0.1.3:11211"}, + {"599043", "10.0.1.1:11211"}, + {"599276", "10.0.1.7:11211"}, + {"599509", "10.0.1.5:11211"}, + {"599742", "10.0.1.7:11211"}, + {"599975", "10.0.1.3:11211"}, + {"600208", "10.0.1.4:11211"}, + {"600441", "10.0.1.1:11211"}, + {"600674", "10.0.1.8:11211"}, + {"600907", "10.0.1.6:11211"}, + {"601140", "10.0.1.7:11211"}, + {"601373", "10.0.1.2:11211"}, + {"601606", "10.0.1.5:11211"}, + {"601839", "10.0.1.6:11211"}, + {"602072", "10.0.1.2:11211"}, + {"602305", "10.0.1.3:11211"}, + {"602538", "10.0.1.3:11211"}, + {"602771", "10.0.1.3:11211"}, + {"603004", "10.0.1.3:11211"}, + {"603237", "10.0.1.8:11211"}, + {"603470", "10.0.1.5:11211"}, + {"603703", "10.0.1.7:11211"}, + {"603936", "10.0.1.4:11211"}, + {"604169", "10.0.1.7:11211"}, + {"604402", "10.0.1.2:11211"}, + {"604635", "10.0.1.3:11211"}, + {"604868", "10.0.1.5:11211"}, + {"605101", "10.0.1.5:11211"}, + {"614887", "10.0.1.2:11211"}, + {"615120", "10.0.1.2:11211"}, + {"615353", "10.0.1.8:11211"}, + {"615586", "10.0.1.6:11211"}, + {"615819", "10.0.1.5:11211"}, + {"616052", "10.0.1.3:11211"}, + {"616285", "10.0.1.1:11211"}, + {"616518", "10.0.1.8:11211"}, + {"616751", "10.0.1.5:11211"}, + {"616984", "10.0.1.8:11211"}, + {"617217", "10.0.1.3:11211"}, + {"617450", "10.0.1.4:11211"}, + {"617683", "10.0.1.1:11211"}, + {"617916", "10.0.1.7:11211"}, + {"618149", "10.0.1.8:11211"}, + {"618382", "10.0.1.7:11211"}, + {"618615", "10.0.1.7:11211"}, + {"618848", "10.0.1.4:11211"}, + {"619081", "10.0.1.1:11211"}, + {"619314", "10.0.1.8:11211"}, + {"619547", "10.0.1.3:11211"}, + {"619780", "10.0.1.1:11211"}, + {"620013", "10.0.1.7:11211"}, + {"620246", "10.0.1.5:11211"}, + {"620479", "10.0.1.3:11211"}, + {"620712", "10.0.1.7:11211"}, + {"625139", "10.0.1.1:11211"}, + {"625372", "10.0.1.8:11211"}, + {"625605", "10.0.1.7:11211"}, + {"625838", "10.0.1.2:11211"}, + {"626071", "10.0.1.5:11211"}, + {"626304", "10.0.1.3:11211"}, + {"626537", "10.0.1.5:11211"}, + {"626770", "10.0.1.1:11211"}, + {"627003", "10.0.1.8:11211"}, + {"627236", "10.0.1.4:11211"}, + {"627469", "10.0.1.4:11211"}, + {"627702", "10.0.1.8:11211"}, + {"627935", "10.0.1.1:11211"}, + {"628168", "10.0.1.6:11211"}, + {"628401", "10.0.1.4:11211"}, + {"628634", "10.0.1.4:11211"}, + {"628867", "10.0.1.3:11211"}, + {"629100", "10.0.1.5:11211"}, + {"629333", "10.0.1.4:11211"}, + {"629566", "10.0.1.4:11211"}, + {"629799", "10.0.1.2:11211"}, + {"630032", "10.0.1.5:11211"}, + {"630265", "10.0.1.8:11211"}, + {"630498", "10.0.1.3:11211"}, + {"630731", "10.0.1.6:11211"}, + {"630964", "10.0.1.6:11211"}, + {"631197", "10.0.1.2:11211"}, + {"631430", "10.0.1.3:11211"}, + {"631663", "10.0.1.7:11211"}, + {"631896", "10.0.1.4:11211"}, + {"632129", "10.0.1.2:11211"}, + {"632362", "10.0.1.7:11211"}, + {"632595", "10.0.1.6:11211"}, + {"632828", "10.0.1.3:11211"}, + {"633061", "10.0.1.8:11211"}, + {"633294", "10.0.1.5:11211"}, + {"633527", "10.0.1.3:11211"}, + {"633760", "10.0.1.6:11211"}, + {"633993", "10.0.1.7:11211"}, + {"634226", "10.0.1.2:11211"}, + {"634459", "10.0.1.1:11211"}, + {"634692", "10.0.1.2:11211"}, + {"634925", "10.0.1.6:11211"}, + {"635158", "10.0.1.4:11211"}, + {"635391", "10.0.1.4:11211"}, + {"635624", "10.0.1.4:11211"}, + {"635857", "10.0.1.5:11211"}, + {"636090", "10.0.1.2:11211"}, + {"636323", "10.0.1.3:11211"}, + {"636556", "10.0.1.1:11211"}, + {"636789", "10.0.1.4:11211"}, + {"637022", "10.0.1.1:11211"}, + {"637255", "10.0.1.4:11211"}, + {"637488", "10.0.1.3:11211"}, + {"637721", "10.0.1.3:11211"}, + {"637954", "10.0.1.2:11211"}, + {"638187", "10.0.1.2:11211"}, + {"638420", "10.0.1.5:11211"}, + {"638653", "10.0.1.5:11211"}, + {"638886", "10.0.1.2:11211"}, + {"639119", "10.0.1.6:11211"}, + {"639352", "10.0.1.6:11211"}, + {"639585", "10.0.1.2:11211"}, + {"639818", "10.0.1.7:11211"}, + {"640051", "10.0.1.3:11211"}, + {"640284", "10.0.1.3:11211"}, + {"640517", "10.0.1.7:11211"}, + {"640750", "10.0.1.4:11211"}, + {"640983", "10.0.1.3:11211"}, + {"641216", "10.0.1.2:11211"}, + {"641449", "10.0.1.1:11211"}, + {"641682", "10.0.1.4:11211"}, + {"641915", "10.0.1.5:11211"}, + {"642148", "10.0.1.1:11211"}, + {"642381", "10.0.1.2:11211"}, + {"642614", "10.0.1.7:11211"}, + {"642847", "10.0.1.7:11211"}, + {"643080", "10.0.1.3:11211"}, + {"643313", "10.0.1.6:11211"}, + {"643546", "10.0.1.4:11211"}, + {"643779", "10.0.1.3:11211"}, + {"644012", "10.0.1.5:11211"}, + {"644245", "10.0.1.3:11211"}, + {"644478", "10.0.1.8:11211"}, + {"644711", "10.0.1.8:11211"}, + {"644944", "10.0.1.2:11211"}, + {"645177", "10.0.1.8:11211"}, + {"645410", "10.0.1.6:11211"}, + {"645643", "10.0.1.2:11211"}, + {"645876", "10.0.1.4:11211"}, + {"646109", "10.0.1.4:11211"}, + {"646342", "10.0.1.1:11211"}, + {"646575", "10.0.1.3:11211"}, + {"646808", "10.0.1.3:11211"}, + {"647041", "10.0.1.4:11211"}, + {"647274", "10.0.1.5:11211"}, + {"647507", "10.0.1.7:11211"}, + {"647740", "10.0.1.1:11211"}, + {"647973", "10.0.1.7:11211"}, + {"648206", "10.0.1.7:11211"}, + {"648439", "10.0.1.8:11211"}, + {"648672", "10.0.1.4:11211"}, + {"648905", "10.0.1.4:11211"}, + {"649138", "10.0.1.5:11211"}, + {"649371", "10.0.1.6:11211"}, + {"649604", "10.0.1.3:11211"}, + {"649837", "10.0.1.7:11211"}, + {"650070", "10.0.1.5:11211"}, + {"650303", "10.0.1.4:11211"}, + {"650536", "10.0.1.8:11211"}, + {"650769", "10.0.1.8:11211"}, + {"651002", "10.0.1.8:11211"}, + {"651235", "10.0.1.4:11211"}, + {"651468", "10.0.1.1:11211"}, + {"651701", "10.0.1.3:11211"}, + {"651934", "10.0.1.4:11211"}, + {"652167", "10.0.1.4:11211"}, + {"652400", "10.0.1.7:11211"}, + {"652633", "10.0.1.4:11211"}, + {"652866", "10.0.1.3:11211"}, + {"653099", "10.0.1.1:11211"}, + {"653332", "10.0.1.8:11211"}, + {"653565", "10.0.1.2:11211"}, + {"653798", "10.0.1.2:11211"}, + {"654031", "10.0.1.6:11211"}, + {"654264", "10.0.1.2:11211"}, + {"654497", "10.0.1.4:11211"}, + {"654730", "10.0.1.7:11211"}, + {"654963", "10.0.1.8:11211"}, + {"655196", "10.0.1.1:11211"}, + {"655429", "10.0.1.8:11211"}, + {"655662", "10.0.1.5:11211"}, + {"655895", "10.0.1.7:11211"}, + {"656128", "10.0.1.3:11211"}, + {"656361", "10.0.1.8:11211"}, + {"656594", "10.0.1.5:11211"}, + {"656827", "10.0.1.6:11211"}, + {"657060", "10.0.1.3:11211"}, + {"657293", "10.0.1.2:11211"}, + {"657526", "10.0.1.5:11211"}, + {"657759", "10.0.1.2:11211"}, + {"657992", "10.0.1.4:11211"}, + {"658225", "10.0.1.5:11211"}, + {"658458", "10.0.1.8:11211"}, + {"658691", "10.0.1.1:11211"}, + {"658924", "10.0.1.4:11211"}, + {"659157", "10.0.1.2:11211"}, + {"659390", "10.0.1.2:11211"}, + {"659623", "10.0.1.2:11211"}, + {"659856", "10.0.1.3:11211"}, + {"660089", "10.0.1.8:11211"}, + {"660322", "10.0.1.7:11211"}, + {"660555", "10.0.1.4:11211"}, + {"660788", "10.0.1.7:11211"}, + {"661021", "10.0.1.1:11211"}, + {"661254", "10.0.1.7:11211"}, + {"661487", "10.0.1.2:11211"}, + {"661720", "10.0.1.5:11211"}, + {"661953", "10.0.1.7:11211"}, + {"662186", "10.0.1.1:11211"}, + {"662419", "10.0.1.1:11211"}, + {"662652", "10.0.1.3:11211"}, + {"662885", "10.0.1.7:11211"}, + {"677564", "10.0.1.5:11211"}, + {"677797", "10.0.1.7:11211"}, + {"678030", "10.0.1.6:11211"}, + {"678263", "10.0.1.8:11211"}, + {"678496", "10.0.1.8:11211"}, + {"678729", "10.0.1.6:11211"}, + {"678962", "10.0.1.1:11211"}, + {"679195", "10.0.1.7:11211"}, + {"679428", "10.0.1.6:11211"}, + {"679661", "10.0.1.5:11211"}, + {"679894", "10.0.1.8:11211"}, + {"680127", "10.0.1.5:11211"}, + {"680360", "10.0.1.1:11211"}, + {"680593", "10.0.1.8:11211"}, + {"680826", "10.0.1.7:11211"}, + {"681059", "10.0.1.5:11211"}, + {"681292", "10.0.1.7:11211"}, + {"681525", "10.0.1.3:11211"}, + {"681758", "10.0.1.3:11211"}, + {"685952", "10.0.1.4:11211"}, + {"686185", "10.0.1.6:11211"}, + {"686418", "10.0.1.5:11211"}, + {"686651", "10.0.1.3:11211"}, + {"686884", "10.0.1.8:11211"}, + {"687117", "10.0.1.6:11211"}, + {"687350", "10.0.1.4:11211"}, + {"687583", "10.0.1.8:11211"}, + {"687816", "10.0.1.7:11211"}, + {"688049", "10.0.1.7:11211"}, + {"688282", "10.0.1.5:11211"}, + {"688515", "10.0.1.6:11211"}, + {"688748", "10.0.1.4:11211"}, + {"688981", "10.0.1.1:11211"}, + {"689214", "10.0.1.5:11211"}, + {"689447", "10.0.1.3:11211"}, + {"689680", "10.0.1.8:11211"}, + {"689913", "10.0.1.8:11211"}, + {"690146", "10.0.1.6:11211"}, + {"690379", "10.0.1.8:11211"}, + {"690612", "10.0.1.2:11211"}, + {"690845", "10.0.1.6:11211"}, + {"691078", "10.0.1.2:11211"}, + {"691311", "10.0.1.7:11211"}, + {"691544", "10.0.1.8:11211"}, + {"691777", "10.0.1.5:11211"}, + {"692010", "10.0.1.7:11211"}, + {"692243", "10.0.1.4:11211"}, + {"692476", "10.0.1.3:11211"}, + {"692709", "10.0.1.3:11211"}, + {"692942", "10.0.1.1:11211"}, + {"693175", "10.0.1.5:11211"}, + {"693408", "10.0.1.5:11211"}, + {"693641", "10.0.1.6:11211"}, + {"693874", "10.0.1.3:11211"}, + {"694107", "10.0.1.3:11211"}, + {"694340", "10.0.1.5:11211"}, + {"694573", "10.0.1.4:11211"}, + {"694806", "10.0.1.6:11211"}, + {"695039", "10.0.1.1:11211"}, + {"695272", "10.0.1.6:11211"}, + {"695505", "10.0.1.3:11211"}, + {"695738", "10.0.1.6:11211"}, + {"695971", "10.0.1.8:11211"}, + {"696204", "10.0.1.2:11211"}, + {"696437", "10.0.1.1:11211"}, + {"696670", "10.0.1.6:11211"}, + {"696903", "10.0.1.7:11211"}, + {"697136", "10.0.1.2:11211"}, + {"697369", "10.0.1.6:11211"}, + {"697602", "10.0.1.2:11211"}, + {"697835", "10.0.1.4:11211"}, + {"698068", "10.0.1.8:11211"}, + {"698301", "10.0.1.5:11211"}, + {"698534", "10.0.1.4:11211"}, + {"698767", "10.0.1.6:11211"}, + {"699000", "10.0.1.7:11211"}, + {"699233", "10.0.1.4:11211"}, + {"699466", "10.0.1.5:11211"}, + {"699699", "10.0.1.3:11211"}, + {"699932", "10.0.1.2:11211"}, + {"700165", "10.0.1.6:11211"}, + {"700398", "10.0.1.1:11211"}, + {"700631", "10.0.1.1:11211"}, + {"700864", "10.0.1.3:11211"}, + {"701097", "10.0.1.4:11211"}, + {"701330", "10.0.1.7:11211"}, + {"701563", "10.0.1.6:11211"}, + {"701796", "10.0.1.8:11211"}, + {"702029", "10.0.1.4:11211"}, + {"702262", "10.0.1.4:11211"}, + {"702495", "10.0.1.5:11211"}, + {"702728", "10.0.1.7:11211"}, + {"702961", "10.0.1.4:11211"}, + {"703194", "10.0.1.5:11211"}, + {"703427", "10.0.1.7:11211"}, + {"703660", "10.0.1.1:11211"}, + {"703893", "10.0.1.6:11211"}, + {"704126", "10.0.1.4:11211"}, + {"704359", "10.0.1.1:11211"}, + {"704592", "10.0.1.4:11211"}, + {"704825", "10.0.1.2:11211"}, + {"705058", "10.0.1.8:11211"}, + {"705291", "10.0.1.6:11211"}, + {"705524", "10.0.1.3:11211"}, + {"705757", "10.0.1.7:11211"}, + {"705990", "10.0.1.7:11211"}, + {"706223", "10.0.1.8:11211"}, + {"706456", "10.0.1.7:11211"}, + {"706689", "10.0.1.8:11211"}, + {"706922", "10.0.1.7:11211"}, + {"707155", "10.0.1.8:11211"}, + {"707388", "10.0.1.4:11211"}, + {"707621", "10.0.1.8:11211"}, + {"707854", "10.0.1.8:11211"}, + {"708087", "10.0.1.7:11211"}, + {"708320", "10.0.1.5:11211"}, + {"708553", "10.0.1.6:11211"}, + {"708786", "10.0.1.3:11211"}, + {"709019", "10.0.1.7:11211"}, + {"709252", "10.0.1.4:11211"}, + {"709485", "10.0.1.8:11211"}, + {"709718", "10.0.1.1:11211"}, + {"709951", "10.0.1.3:11211"}, + {"710184", "10.0.1.1:11211"}, + {"710417", "10.0.1.6:11211"}, + {"710650", "10.0.1.1:11211"}, + {"710883", "10.0.1.7:11211"}, + {"711116", "10.0.1.4:11211"}, + {"711349", "10.0.1.7:11211"}, + {"711582", "10.0.1.1:11211"}, + {"711815", "10.0.1.7:11211"}, + {"712048", "10.0.1.6:11211"}, + {"712281", "10.0.1.7:11211"}, + {"712514", "10.0.1.2:11211"}, + {"712747", "10.0.1.3:11211"}, + {"712980", "10.0.1.1:11211"}, + {"713213", "10.0.1.8:11211"}, + {"713446", "10.0.1.2:11211"}, + {"713679", "10.0.1.8:11211"}, + {"713912", "10.0.1.3:11211"}, + {"714145", "10.0.1.3:11211"}, + {"714378", "10.0.1.6:11211"}, + {"714611", "10.0.1.8:11211"}, + {"714844", "10.0.1.3:11211"}, + {"715077", "10.0.1.8:11211"}, + {"715310", "10.0.1.3:11211"}, + {"715543", "10.0.1.7:11211"}, + {"715776", "10.0.1.2:11211"}, + {"716009", "10.0.1.5:11211"}, + {"716242", "10.0.1.7:11211"}, + {"716475", "10.0.1.8:11211"}, + {"716708", "10.0.1.7:11211"}, + {"716941", "10.0.1.6:11211"}, + {"717174", "10.0.1.3:11211"}, + {"717407", "10.0.1.2:11211"}, + {"717640", "10.0.1.3:11211"}, + {"717873", "10.0.1.3:11211"}, + {"718106", "10.0.1.2:11211"}, + {"718339", "10.0.1.8:11211"}, + {"718572", "10.0.1.5:11211"}, + {"718805", "10.0.1.5:11211"}, + {"719038", "10.0.1.8:11211"}, + {"719271", "10.0.1.5:11211"}, + {"719504", "10.0.1.3:11211"}, + {"719737", "10.0.1.5:11211"}, + {"719970", "10.0.1.6:11211"}, + {"720203", "10.0.1.7:11211"}, + {"720436", "10.0.1.2:11211"}, + {"720669", "10.0.1.7:11211"}, + {"720902", "10.0.1.7:11211"}, + {"721135", "10.0.1.2:11211"}, + {"721368", "10.0.1.1:11211"}, + {"721601", "10.0.1.2:11211"}, + {"721834", "10.0.1.3:11211"}, + {"722067", "10.0.1.5:11211"}, + {"722300", "10.0.1.8:11211"}, + {"722533", "10.0.1.1:11211"}, + {"722766", "10.0.1.4:11211"}, + {"722999", "10.0.1.8:11211"}, + {"723232", "10.0.1.8:11211"}, + {"723465", "10.0.1.8:11211"}, + {"723698", "10.0.1.2:11211"}, + {"723931", "10.0.1.6:11211"}, + {"724164", "10.0.1.5:11211"}, + {"724397", "10.0.1.8:11211"}, + {"724630", "10.0.1.5:11211"}, + {"724863", "10.0.1.4:11211"}, + {"725096", "10.0.1.4:11211"}, + {"725329", "10.0.1.8:11211"}, + {"725562", "10.0.1.1:11211"}, + {"729290", "10.0.1.7:11211"}, + {"729523", "10.0.1.1:11211"}, + {"729756", "10.0.1.1:11211"}, + {"729989", "10.0.1.5:11211"}, + {"730222", "10.0.1.3:11211"}, + {"730455", "10.0.1.3:11211"}, + {"730688", "10.0.1.6:11211"}, + {"730921", "10.0.1.1:11211"}, + {"731154", "10.0.1.4:11211"}, + {"731387", "10.0.1.1:11211"}, + {"731620", "10.0.1.7:11211"}, + {"731853", "10.0.1.3:11211"}, + {"732086", "10.0.1.7:11211"}, + {"732319", "10.0.1.7:11211"}, + {"732552", "10.0.1.6:11211"}, + {"732785", "10.0.1.2:11211"}, + {"733018", "10.0.1.5:11211"}, + {"733251", "10.0.1.8:11211"}, + {"733484", "10.0.1.3:11211"}, + {"733717", "10.0.1.5:11211"}, + {"733950", "10.0.1.5:11211"}, + {"734183", "10.0.1.1:11211"}, + {"734416", "10.0.1.7:11211"}, + {"734649", "10.0.1.4:11211"}, + {"734882", "10.0.1.8:11211"}, + {"735115", "10.0.1.4:11211"}, + {"735348", "10.0.1.1:11211"}, + {"735581", "10.0.1.7:11211"}, + {"735814", "10.0.1.2:11211"}, + {"736047", "10.0.1.1:11211"}, + {"736280", "10.0.1.3:11211"}, + {"736513", "10.0.1.3:11211"}, + {"736746", "10.0.1.6:11211"}, + {"736979", "10.0.1.3:11211"}, + {"737212", "10.0.1.6:11211"}, + {"737445", "10.0.1.4:11211"}, + {"737678", "10.0.1.4:11211"}, + {"737911", "10.0.1.4:11211"}, + {"740474", "10.0.1.7:11211"}, + {"740707", "10.0.1.2:11211"}, + {"740940", "10.0.1.1:11211"}, + {"741173", "10.0.1.2:11211"}, + {"741406", "10.0.1.5:11211"}, + {"741639", "10.0.1.8:11211"}, + {"741872", "10.0.1.5:11211"}, + {"742105", "10.0.1.6:11211"}, + {"742338", "10.0.1.7:11211"}, + {"742571", "10.0.1.1:11211"}, + {"742804", "10.0.1.7:11211"}, + {"743037", "10.0.1.7:11211"}, + {"743270", "10.0.1.1:11211"}, + {"743503", "10.0.1.1:11211"}, + {"743736", "10.0.1.5:11211"}, + {"743969", "10.0.1.6:11211"}, + {"744202", "10.0.1.7:11211"}, + {"744435", "10.0.1.2:11211"}, + {"744668", "10.0.1.2:11211"}, + {"744901", "10.0.1.5:11211"}, + {"745134", "10.0.1.1:11211"}, + {"745367", "10.0.1.7:11211"}, + {"745600", "10.0.1.7:11211"}, + {"745833", "10.0.1.7:11211"}, + {"746066", "10.0.1.6:11211"}, + {"746299", "10.0.1.5:11211"}, + {"746532", "10.0.1.3:11211"}, + {"746765", "10.0.1.6:11211"}, + {"746998", "10.0.1.3:11211"}, + {"747231", "10.0.1.7:11211"}, + {"747464", "10.0.1.1:11211"}, + {"747697", "10.0.1.6:11211"}, + {"747930", "10.0.1.3:11211"}, + {"748163", "10.0.1.6:11211"}, + {"748396", "10.0.1.7:11211"}, + {"748629", "10.0.1.6:11211"}, + {"748862", "10.0.1.2:11211"}, + {"749095", "10.0.1.4:11211"}, + {"749328", "10.0.1.7:11211"}, + {"749561", "10.0.1.4:11211"}, + {"749794", "10.0.1.5:11211"}, + {"750027", "10.0.1.3:11211"}, + {"750260", "10.0.1.6:11211"}, + {"750493", "10.0.1.5:11211"}, + {"750726", "10.0.1.4:11211"}, + {"750959", "10.0.1.1:11211"}, + {"751192", "10.0.1.1:11211"}, + {"751425", "10.0.1.6:11211"}, + {"751658", "10.0.1.3:11211"}, + {"751891", "10.0.1.6:11211"}, + {"752124", "10.0.1.6:11211"}, + {"752357", "10.0.1.5:11211"}, + {"752590", "10.0.1.3:11211"}, + {"752823", "10.0.1.2:11211"}, + {"753056", "10.0.1.1:11211"}, + {"753289", "10.0.1.7:11211"}, + {"753522", "10.0.1.8:11211"}, + {"753755", "10.0.1.3:11211"}, + {"753988", "10.0.1.2:11211"}, + {"754221", "10.0.1.8:11211"}, + {"754454", "10.0.1.1:11211"}, + {"754687", "10.0.1.7:11211"}, + {"754920", "10.0.1.6:11211"}, + {"755153", "10.0.1.2:11211"}, + {"755386", "10.0.1.1:11211"}, + {"755619", "10.0.1.7:11211"}, + {"755852", "10.0.1.3:11211"}, + {"756085", "10.0.1.5:11211"}, + {"756318", "10.0.1.6:11211"}, + {"756551", "10.0.1.3:11211"}, + {"756784", "10.0.1.7:11211"}, + {"757017", "10.0.1.7:11211"}, + {"757250", "10.0.1.2:11211"}, + {"757483", "10.0.1.2:11211"}, + {"757716", "10.0.1.3:11211"}, + {"757949", "10.0.1.2:11211"}, + {"758182", "10.0.1.3:11211"}, + {"758415", "10.0.1.4:11211"}, + {"758648", "10.0.1.6:11211"}, + {"758881", "10.0.1.6:11211"}, + {"759114", "10.0.1.5:11211"}, + {"759347", "10.0.1.8:11211"}, + {"759580", "10.0.1.8:11211"}, + {"759813", "10.0.1.1:11211"}, + {"760046", "10.0.1.1:11211"}, + {"760279", "10.0.1.3:11211"}, + {"760512", "10.0.1.6:11211"}, + {"760745", "10.0.1.8:11211"}, + {"760978", "10.0.1.6:11211"}, + {"761211", "10.0.1.4:11211"}, + {"761444", "10.0.1.7:11211"}, + {"761677", "10.0.1.1:11211"}, + {"761910", "10.0.1.4:11211"}, + {"762143", "10.0.1.7:11211"}, + {"762376", "10.0.1.7:11211"}, + {"762609", "10.0.1.2:11211"}, + {"762842", "10.0.1.6:11211"}, + {"763075", "10.0.1.7:11211"}, + {"763308", "10.0.1.3:11211"}, + {"763541", "10.0.1.7:11211"}, + {"763774", "10.0.1.5:11211"}, + {"764007", "10.0.1.3:11211"}, + {"764240", "10.0.1.6:11211"}, + {"764473", "10.0.1.4:11211"}, + {"764706", "10.0.1.3:11211"}, + {"764939", "10.0.1.2:11211"}, + {"765172", "10.0.1.8:11211"}, + {"765405", "10.0.1.2:11211"}, + {"765638", "10.0.1.6:11211"}, + {"765871", "10.0.1.1:11211"}, + {"766104", "10.0.1.1:11211"}, + {"766337", "10.0.1.5:11211"}, + {"766570", "10.0.1.4:11211"}, + {"766803", "10.0.1.5:11211"}, + {"767036", "10.0.1.6:11211"}, + {"767269", "10.0.1.3:11211"}, + {"767502", "10.0.1.5:11211"}, + {"767735", "10.0.1.2:11211"}, + {"767968", "10.0.1.3:11211"}, + {"768201", "10.0.1.7:11211"}, + {"777521", "10.0.1.2:11211"}, + {"777754", "10.0.1.4:11211"}, + {"777987", "10.0.1.2:11211"}, + {"778220", "10.0.1.3:11211"}, + {"778453", "10.0.1.6:11211"}, + {"778686", "10.0.1.8:11211"}, + {"778919", "10.0.1.1:11211"}, + {"779152", "10.0.1.1:11211"}, + {"779385", "10.0.1.3:11211"}, + {"779618", "10.0.1.5:11211"}, + {"779851", "10.0.1.8:11211"}, + {"780084", "10.0.1.7:11211"}, + {"780317", "10.0.1.2:11211"}, + {"780550", "10.0.1.1:11211"}, + {"780783", "10.0.1.2:11211"}, + {"781016", "10.0.1.3:11211"}, + {"781249", "10.0.1.6:11211"}, + {"781482", "10.0.1.1:11211"}, + {"781715", "10.0.1.7:11211"}, + {"781948", "10.0.1.3:11211"}, + {"782181", "10.0.1.6:11211"}, + {"782414", "10.0.1.2:11211"}, + {"782647", "10.0.1.5:11211"}, + {"782880", "10.0.1.5:11211"}, + {"783113", "10.0.1.4:11211"}, + {"783346", "10.0.1.8:11211"}, + {"783579", "10.0.1.8:11211"}, + {"783812", "10.0.1.1:11211"}, + {"784045", "10.0.1.5:11211"}, + {"784278", "10.0.1.1:11211"}, + {"784511", "10.0.1.8:11211"}, + {"784744", "10.0.1.8:11211"}, + {"784977", "10.0.1.1:11211"}, + {"785210", "10.0.1.7:11211"}, + {"785443", "10.0.1.2:11211"}, + {"785676", "10.0.1.8:11211"}, + {"785909", "10.0.1.3:11211"}, + {"786142", "10.0.1.8:11211"}, + {"786375", "10.0.1.8:11211"}, + {"786608", "10.0.1.2:11211"}, + {"786841", "10.0.1.1:11211"}, + {"787074", "10.0.1.8:11211"}, + {"787307", "10.0.1.7:11211"}, + {"787540", "10.0.1.2:11211"}, + {"787773", "10.0.1.3:11211"}, + {"788006", "10.0.1.8:11211"}, + {"788239", "10.0.1.1:11211"}, + {"788472", "10.0.1.2:11211"}, + {"788705", "10.0.1.6:11211"}, + {"788938", "10.0.1.7:11211"}, + {"789171", "10.0.1.7:11211"}, + {"789404", "10.0.1.2:11211"}, + {"789637", "10.0.1.8:11211"}, + {"789870", "10.0.1.3:11211"}, + {"790103", "10.0.1.8:11211"}, + {"790336", "10.0.1.6:11211"}, + {"790569", "10.0.1.5:11211"}, + {"790802", "10.0.1.6:11211"}, + {"791035", "10.0.1.7:11211"}, + {"791268", "10.0.1.7:11211"}, + {"791501", "10.0.1.7:11211"}, + {"791734", "10.0.1.8:11211"}, + {"791967", "10.0.1.6:11211"}, + {"792200", "10.0.1.3:11211"}, + {"792433", "10.0.1.6:11211"}, + {"792666", "10.0.1.7:11211"}, + {"792899", "10.0.1.2:11211"}, + {"793132", "10.0.1.1:11211"}, + {"793365", "10.0.1.5:11211"}, + {"793598", "10.0.1.2:11211"}, + {"793831", "10.0.1.6:11211"}, + {"794064", "10.0.1.3:11211"}, + {"794297", "10.0.1.1:11211"}, + {"794530", "10.0.1.1:11211"}, + {"794763", "10.0.1.7:11211"}, + {"794996", "10.0.1.6:11211"}, + {"795229", "10.0.1.5:11211"}, + {"795462", "10.0.1.1:11211"}, + {"795695", "10.0.1.8:11211"}, + {"795928", "10.0.1.4:11211"}, + {"796161", "10.0.1.3:11211"}, + {"796394", "10.0.1.7:11211"}, + {"796627", "10.0.1.5:11211"}, + {"796860", "10.0.1.8:11211"}, + {"797093", "10.0.1.7:11211"}, + {"797326", "10.0.1.3:11211"}, + {"797559", "10.0.1.3:11211"}, + {"797792", "10.0.1.8:11211"}, + {"798025", "10.0.1.4:11211"}, + {"798258", "10.0.1.3:11211"}, + {"798491", "10.0.1.3:11211"}, + {"798724", "10.0.1.8:11211"}, + {"798957", "10.0.1.2:11211"}, + {"799190", "10.0.1.5:11211"}, + {"799423", "10.0.1.2:11211"}, + {"799656", "10.0.1.6:11211"}, + {"799889", "10.0.1.5:11211"}, + {"800122", "10.0.1.5:11211"}, + {"800355", "10.0.1.3:11211"}, + {"800588", "10.0.1.6:11211"}, + {"800821", "10.0.1.5:11211"}, + {"801054", "10.0.1.3:11211"}, + {"801287", "10.0.1.2:11211"}, + {"801520", "10.0.1.2:11211"}, + {"801753", "10.0.1.8:11211"}, + {"801986", "10.0.1.2:11211"}, + {"802219", "10.0.1.1:11211"}, + {"802452", "10.0.1.2:11211"}, + {"802685", "10.0.1.7:11211"}, + {"802918", "10.0.1.8:11211"}, + {"803151", "10.0.1.6:11211"}, + {"803384", "10.0.1.7:11211"}, + {"803617", "10.0.1.7:11211"}, + {"803850", "10.0.1.3:11211"}, + {"804083", "10.0.1.8:11211"}, + {"804316", "10.0.1.8:11211"}, + {"804549", "10.0.1.6:11211"}, + {"804782", "10.0.1.4:11211"}, + {"805015", "10.0.1.7:11211"}, + {"805248", "10.0.1.5:11211"}, + {"805481", "10.0.1.2:11211"}, + {"805714", "10.0.1.7:11211"}, + {"805947", "10.0.1.6:11211"}, + {"806180", "10.0.1.2:11211"}, + {"806413", "10.0.1.3:11211"}, + {"806646", "10.0.1.2:11211"}, + {"806879", "10.0.1.5:11211"}, + {"807112", "10.0.1.8:11211"}, + {"807345", "10.0.1.1:11211"}, + {"807578", "10.0.1.7:11211"}, + {"807811", "10.0.1.5:11211"}, + {"808044", "10.0.1.2:11211"}, + {"808277", "10.0.1.7:11211"}, + {"808510", "10.0.1.5:11211"}, + {"808743", "10.0.1.8:11211"}, + {"811772", "10.0.1.5:11211"}, + {"812005", "10.0.1.7:11211"}, + {"812238", "10.0.1.1:11211"}, + {"812471", "10.0.1.2:11211"}, + {"815500", "10.0.1.5:11211"}, + {"815733", "10.0.1.6:11211"}, + {"815966", "10.0.1.8:11211"}, + {"816199", "10.0.1.8:11211"}, + {"816432", "10.0.1.2:11211"}, + {"816665", "10.0.1.4:11211"}, + {"816898", "10.0.1.1:11211"}, + {"817131", "10.0.1.1:11211"}, + {"817364", "10.0.1.7:11211"}, + {"817597", "10.0.1.3:11211"}, + {"817830", "10.0.1.2:11211"}, + {"818063", "10.0.1.7:11211"}, + {"818296", "10.0.1.6:11211"}, + {"818529", "10.0.1.4:11211"}, + {"818762", "10.0.1.3:11211"}, + {"818995", "10.0.1.2:11211"}, + {"819228", "10.0.1.4:11211"}, + {"819461", "10.0.1.7:11211"}, + {"819694", "10.0.1.5:11211"}, + {"819927", "10.0.1.6:11211"}, + {"820160", "10.0.1.5:11211"}, + {"820393", "10.0.1.2:11211"}, + {"820626", "10.0.1.1:11211"}, + {"820859", "10.0.1.6:11211"}, + {"821092", "10.0.1.8:11211"}, + {"821325", "10.0.1.2:11211"}, + {"821558", "10.0.1.6:11211"}, + {"821791", "10.0.1.2:11211"}, + {"822024", "10.0.1.1:11211"}, + {"822257", "10.0.1.3:11211"}, + {"822490", "10.0.1.1:11211"}, + {"822723", "10.0.1.6:11211"}, + {"822956", "10.0.1.1:11211"}, + {"823189", "10.0.1.7:11211"}, + {"823422", "10.0.1.2:11211"}, + {"823655", "10.0.1.3:11211"}, + {"823888", "10.0.1.7:11211"}, + {"824121", "10.0.1.2:11211"}, + {"824354", "10.0.1.6:11211"}, + {"824587", "10.0.1.7:11211"}, + {"824820", "10.0.1.8:11211"}, + {"825053", "10.0.1.5:11211"}, + {"825286", "10.0.1.4:11211"}, + {"825519", "10.0.1.8:11211"}, + {"825752", "10.0.1.8:11211"}, + {"825985", "10.0.1.8:11211"}, + {"826218", "10.0.1.8:11211"}, + {"826451", "10.0.1.4:11211"}, + {"826684", "10.0.1.7:11211"}, + {"826917", "10.0.1.5:11211"}, + {"827150", "10.0.1.3:11211"}, + {"827383", "10.0.1.1:11211"}, + {"827616", "10.0.1.8:11211"}, + {"827849", "10.0.1.1:11211"}, + {"828082", "10.0.1.7:11211"}, + {"828315", "10.0.1.7:11211"}, + {"828548", "10.0.1.4:11211"}, + {"828781", "10.0.1.7:11211"}, + {"829014", "10.0.1.5:11211"}, + {"829247", "10.0.1.7:11211"}, + {"829480", "10.0.1.2:11211"}, + {"829713", "10.0.1.4:11211"}, + {"829946", "10.0.1.2:11211"}, + {"830179", "10.0.1.1:11211"}, + {"830412", "10.0.1.3:11211"}, + {"830645", "10.0.1.6:11211"}, + {"830878", "10.0.1.7:11211"}, + {"831111", "10.0.1.1:11211"}, + {"831344", "10.0.1.7:11211"}, + {"831577", "10.0.1.7:11211"}, + {"831810", "10.0.1.1:11211"}, + {"832043", "10.0.1.8:11211"}, + {"832276", "10.0.1.3:11211"}, + {"832509", "10.0.1.3:11211"}, + {"832742", "10.0.1.6:11211"}, + {"832975", "10.0.1.4:11211"}, + {"833208", "10.0.1.1:11211"}, + {"833441", "10.0.1.4:11211"}, + {"833674", "10.0.1.7:11211"}, + {"833907", "10.0.1.3:11211"}, + {"834140", "10.0.1.4:11211"}, + {"834373", "10.0.1.3:11211"}, + {"834606", "10.0.1.7:11211"}, + {"834839", "10.0.1.3:11211"}, + {"835072", "10.0.1.4:11211"}, + {"835305", "10.0.1.5:11211"}, + {"835538", "10.0.1.3:11211"}, + {"835771", "10.0.1.3:11211"}, + {"836004", "10.0.1.6:11211"}, + {"836237", "10.0.1.7:11211"}, + {"836470", "10.0.1.1:11211"}, + {"836703", "10.0.1.7:11211"}, + {"836936", "10.0.1.2:11211"}, + {"837169", "10.0.1.4:11211"}, + {"837402", "10.0.1.2:11211"}, + {"837635", "10.0.1.6:11211"}, + {"837868", "10.0.1.8:11211"}, + {"838101", "10.0.1.2:11211"}, + {"838334", "10.0.1.4:11211"}, + {"838567", "10.0.1.4:11211"}, + {"838800", "10.0.1.7:11211"}, + {"839033", "10.0.1.4:11211"}, + {"839266", "10.0.1.1:11211"}, + {"839499", "10.0.1.5:11211"}, + {"839732", "10.0.1.3:11211"}, + {"839965", "10.0.1.7:11211"}, + {"840198", "10.0.1.7:11211"}, + {"840431", "10.0.1.7:11211"}, + {"840664", "10.0.1.6:11211"}, + {"840897", "10.0.1.1:11211"}, + {"841130", "10.0.1.7:11211"}, + {"841363", "10.0.1.2:11211"}, + {"841596", "10.0.1.6:11211"}, + {"841829", "10.0.1.7:11211"}, + {"842062", "10.0.1.4:11211"}, + {"842295", "10.0.1.7:11211"}, + {"842528", "10.0.1.7:11211"}, + {"842761", "10.0.1.2:11211"}, + {"842994", "10.0.1.7:11211"}, + {"843227", "10.0.1.7:11211"}, + {"843460", "10.0.1.5:11211"}, + {"843693", "10.0.1.1:11211"}, + {"843926", "10.0.1.5:11211"}, + {"844159", "10.0.1.1:11211"}, + {"844392", "10.0.1.6:11211"}, + {"844625", "10.0.1.1:11211"}, + {"844858", "10.0.1.5:11211"}, + {"845091", "10.0.1.8:11211"}, + {"845324", "10.0.1.6:11211"}, + {"845557", "10.0.1.8:11211"}, + {"845790", "10.0.1.4:11211"}, + {"846023", "10.0.1.1:11211"}, + {"846256", "10.0.1.2:11211"}, + {"846489", "10.0.1.8:11211"}, + {"846722", "10.0.1.4:11211"}, + {"846955", "10.0.1.2:11211"}, + {"847188", "10.0.1.6:11211"}, + {"847421", "10.0.1.1:11211"}, + {"847654", "10.0.1.2:11211"}, + {"847887", "10.0.1.1:11211"}, + {"848120", "10.0.1.8:11211"}, + {"848353", "10.0.1.6:11211"}, + {"848586", "10.0.1.6:11211"}, + {"848819", "10.0.1.2:11211"}, + {"849052", "10.0.1.3:11211"}, + {"849285", "10.0.1.1:11211"}, + {"849518", "10.0.1.5:11211"}, + {"849751", "10.0.1.3:11211"}, + {"849984", "10.0.1.6:11211"}, + {"850217", "10.0.1.2:11211"}, + {"855576", "10.0.1.8:11211"}, + {"855809", "10.0.1.2:11211"}, + {"856042", "10.0.1.7:11211"}, + {"856275", "10.0.1.6:11211"}, + {"856508", "10.0.1.7:11211"}, + {"856741", "10.0.1.3:11211"}, + {"856974", "10.0.1.2:11211"}, + {"857207", "10.0.1.4:11211"}, + {"857440", "10.0.1.7:11211"}, + {"857673", "10.0.1.5:11211"}, + {"857906", "10.0.1.8:11211"}, + {"858139", "10.0.1.3:11211"}, + {"858372", "10.0.1.1:11211"}, + {"858605", "10.0.1.1:11211"}, + {"858838", "10.0.1.6:11211"}, + {"859071", "10.0.1.5:11211"}, + {"859304", "10.0.1.3:11211"}, + {"859537", "10.0.1.2:11211"}, + {"859770", "10.0.1.8:11211"}, + {"860003", "10.0.1.8:11211"}, + {"860236", "10.0.1.4:11211"}, + {"860469", "10.0.1.2:11211"}, + {"860702", "10.0.1.7:11211"}, + {"860935", "10.0.1.6:11211"}, + {"861168", "10.0.1.4:11211"}, + {"861401", "10.0.1.2:11211"}, + {"861634", "10.0.1.5:11211"}, + {"861867", "10.0.1.5:11211"}, + {"862100", "10.0.1.1:11211"}, + {"862333", "10.0.1.8:11211"}, + {"862566", "10.0.1.4:11211"}, + {"862799", "10.0.1.1:11211"}, + {"863032", "10.0.1.4:11211"}, + {"863265", "10.0.1.3:11211"}, + {"863498", "10.0.1.6:11211"}, + {"863731", "10.0.1.8:11211"}, + {"863964", "10.0.1.6:11211"}, + {"864197", "10.0.1.5:11211"}, + {"864430", "10.0.1.6:11211"}, + {"864663", "10.0.1.1:11211"}, + {"864896", "10.0.1.6:11211"}, + {"865129", "10.0.1.3:11211"}, + {"865362", "10.0.1.8:11211"}, + {"865595", "10.0.1.4:11211"}, + {"865828", "10.0.1.8:11211"}, + {"866061", "10.0.1.5:11211"}, + {"866294", "10.0.1.3:11211"}, + {"866527", "10.0.1.5:11211"}, + {"866760", "10.0.1.3:11211"}, + {"866993", "10.0.1.2:11211"}, + {"867226", "10.0.1.1:11211"}, + {"867459", "10.0.1.1:11211"}, + {"867692", "10.0.1.6:11211"}, + {"867925", "10.0.1.6:11211"}, + {"868158", "10.0.1.4:11211"}, + {"868391", "10.0.1.8:11211"}, + {"868624", "10.0.1.3:11211"}, + {"868857", "10.0.1.5:11211"}, + {"869090", "10.0.1.4:11211"}, + {"869323", "10.0.1.7:11211"}, + {"869556", "10.0.1.1:11211"}, + {"869789", "10.0.1.2:11211"}, + {"870022", "10.0.1.1:11211"}, + {"870255", "10.0.1.4:11211"}, + {"870488", "10.0.1.7:11211"}, + {"870721", "10.0.1.1:11211"}, + {"870954", "10.0.1.7:11211"}, + {"871187", "10.0.1.1:11211"}, + {"871420", "10.0.1.4:11211"}, + {"871653", "10.0.1.4:11211"}, + {"871886", "10.0.1.8:11211"}, + {"872119", "10.0.1.6:11211"}, + {"872352", "10.0.1.6:11211"}, + {"872585", "10.0.1.1:11211"}, + {"872818", "10.0.1.3:11211"}, + {"873051", "10.0.1.4:11211"}, + {"873284", "10.0.1.8:11211"}, + {"873517", "10.0.1.3:11211"}, + {"873750", "10.0.1.8:11211"}, + {"877711", "10.0.1.6:11211"}, + {"877944", "10.0.1.6:11211"}, + {"878177", "10.0.1.6:11211"}, + {"878410", "10.0.1.3:11211"}, + {"878643", "10.0.1.6:11211"}, + {"878876", "10.0.1.5:11211"}, + {"879109", "10.0.1.4:11211"}, + {"879342", "10.0.1.7:11211"}, + {"879575", "10.0.1.6:11211"}, + {"879808", "10.0.1.1:11211"}, + {"880041", "10.0.1.7:11211"}, + {"880274", "10.0.1.8:11211"}, + {"880507", "10.0.1.2:11211"}, + {"880740", "10.0.1.7:11211"}, + {"880973", "10.0.1.5:11211"}, + {"881206", "10.0.1.8:11211"}, + {"881439", "10.0.1.6:11211"}, + {"881672", "10.0.1.6:11211"}, + {"881905", "10.0.1.2:11211"}, + {"882138", "10.0.1.8:11211"}, + {"882371", "10.0.1.3:11211"}, + {"882604", "10.0.1.1:11211"}, + {"882837", "10.0.1.2:11211"}, + {"883070", "10.0.1.8:11211"}, + {"883303", "10.0.1.3:11211"}, + {"883536", "10.0.1.1:11211"}, + {"883769", "10.0.1.5:11211"}, + {"884002", "10.0.1.2:11211"}, + {"884235", "10.0.1.5:11211"}, + {"884468", "10.0.1.4:11211"}, + {"884701", "10.0.1.3:11211"}, + {"884934", "10.0.1.7:11211"}, + {"885167", "10.0.1.4:11211"}, + {"885400", "10.0.1.7:11211"}, + {"885633", "10.0.1.8:11211"}, + {"885866", "10.0.1.7:11211"}, + {"886099", "10.0.1.4:11211"}, + {"886332", "10.0.1.4:11211"}, + {"886565", "10.0.1.7:11211"}, + {"886798", "10.0.1.3:11211"}, + {"887031", "10.0.1.3:11211"}, + {"887264", "10.0.1.6:11211"}, + {"887497", "10.0.1.7:11211"}, + {"887730", "10.0.1.7:11211"}, + {"887963", "10.0.1.7:11211"}, + {"888196", "10.0.1.3:11211"}, + {"888429", "10.0.1.8:11211"}, + {"888662", "10.0.1.5:11211"}, + {"888895", "10.0.1.4:11211"}, + {"889128", "10.0.1.3:11211"}, + {"889361", "10.0.1.3:11211"}, + {"889594", "10.0.1.4:11211"}, + {"889827", "10.0.1.5:11211"}, + {"890060", "10.0.1.7:11211"}, + {"890293", "10.0.1.3:11211"}, + {"890526", "10.0.1.1:11211"}, + {"890759", "10.0.1.3:11211"}, + {"890992", "10.0.1.3:11211"}, + {"891225", "10.0.1.1:11211"}, + {"891458", "10.0.1.6:11211"}, + {"891691", "10.0.1.6:11211"}, + {"891924", "10.0.1.2:11211"}, + {"892157", "10.0.1.4:11211"}, + {"892390", "10.0.1.4:11211"}, + {"892623", "10.0.1.8:11211"}, + {"892856", "10.0.1.1:11211"}, + {"893089", "10.0.1.7:11211"}, + {"893322", "10.0.1.8:11211"}, + {"893555", "10.0.1.5:11211"}, + {"893788", "10.0.1.6:11211"}, + {"894021", "10.0.1.8:11211"}, + {"894254", "10.0.1.6:11211"}, + {"894487", "10.0.1.4:11211"}, + {"894720", "10.0.1.6:11211"}, + {"894953", "10.0.1.6:11211"}, + {"895186", "10.0.1.3:11211"}, + {"895419", "10.0.1.1:11211"}, + {"895652", "10.0.1.8:11211"}, + {"895885", "10.0.1.2:11211"}, + {"896118", "10.0.1.7:11211"}, + {"896351", "10.0.1.3:11211"}, + {"896584", "10.0.1.2:11211"}, + {"896817", "10.0.1.1:11211"}, + {"897050", "10.0.1.2:11211"}, + {"897283", "10.0.1.2:11211"}, + {"897516", "10.0.1.4:11211"}, + {"897749", "10.0.1.5:11211"}, + {"897982", "10.0.1.6:11211"}, + {"898215", "10.0.1.5:11211"}, + {"898448", "10.0.1.7:11211"}, + {"898681", "10.0.1.2:11211"}, + {"898914", "10.0.1.2:11211"}, + {"899147", "10.0.1.4:11211"}, + {"899380", "10.0.1.5:11211"}, + {"899613", "10.0.1.1:11211"}, + {"899846", "10.0.1.2:11211"}, + {"900079", "10.0.1.3:11211"}, + {"900312", "10.0.1.1:11211"}, + {"900545", "10.0.1.6:11211"}, + {"900778", "10.0.1.6:11211"}, + {"901011", "10.0.1.2:11211"}, + {"901244", "10.0.1.7:11211"}, + {"901477", "10.0.1.6:11211"}, + {"901710", "10.0.1.2:11211"}, + {"901943", "10.0.1.8:11211"}, + {"902176", "10.0.1.6:11211"}, + {"902409", "10.0.1.7:11211"}, + {"902642", "10.0.1.4:11211"}, + {"902875", "10.0.1.5:11211"}, + {"903108", "10.0.1.6:11211"}, + {"907535", "10.0.1.1:11211"}, + {"907768", "10.0.1.3:11211"}, + {"908001", "10.0.1.6:11211"}, + {"908234", "10.0.1.5:11211"}, + {"908467", "10.0.1.2:11211"}, + {"908700", "10.0.1.8:11211"}, + {"908933", "10.0.1.8:11211"}, + {"909166", "10.0.1.2:11211"}, + {"909399", "10.0.1.2:11211"}, + {"909632", "10.0.1.7:11211"}, + {"909865", "10.0.1.3:11211"}, + {"910098", "10.0.1.2:11211"}, + {"910331", "10.0.1.6:11211"}, + {"910564", "10.0.1.2:11211"}, + {"910797", "10.0.1.5:11211"}, + {"911030", "10.0.1.8:11211"}, + {"911263", "10.0.1.7:11211"}, + {"911496", "10.0.1.2:11211"}, + {"911729", "10.0.1.2:11211"}, + {"911962", "10.0.1.1:11211"}, + {"912195", "10.0.1.5:11211"}, + {"912428", "10.0.1.8:11211"}, + {"912661", "10.0.1.8:11211"}, + {"912894", "10.0.1.1:11211"}, + {"913127", "10.0.1.8:11211"}, + {"913360", "10.0.1.7:11211"}, + {"913593", "10.0.1.8:11211"}, + {"913826", "10.0.1.1:11211"}, + {"914059", "10.0.1.2:11211"}, + {"914292", "10.0.1.8:11211"}, + {"914525", "10.0.1.5:11211"}, + {"914758", "10.0.1.1:11211"}, + {"914991", "10.0.1.4:11211"}, + {"915224", "10.0.1.7:11211"}, + {"915457", "10.0.1.1:11211"}, + {"915690", "10.0.1.2:11211"}, + {"915923", "10.0.1.1:11211"}, + {"916156", "10.0.1.8:11211"}, + {"916389", "10.0.1.6:11211"}, + {"916622", "10.0.1.8:11211"}, + {"916855", "10.0.1.5:11211"}, + {"917088", "10.0.1.6:11211"}, + {"917321", "10.0.1.2:11211"}, + {"917554", "10.0.1.8:11211"}, + {"917787", "10.0.1.3:11211"}, + {"918020", "10.0.1.3:11211"}, + {"918253", "10.0.1.1:11211"}, + {"918486", "10.0.1.1:11211"}, + {"918719", "10.0.1.5:11211"}, + {"918952", "10.0.1.1:11211"}, + {"919185", "10.0.1.2:11211"}, + {"919418", "10.0.1.3:11211"}, + {"919651", "10.0.1.6:11211"}, + {"919884", "10.0.1.7:11211"}, + {"920117", "10.0.1.8:11211"}, + {"920350", "10.0.1.4:11211"}, + {"920583", "10.0.1.7:11211"}, + {"920816", "10.0.1.2:11211"}, + {"921049", "10.0.1.6:11211"}, + {"921282", "10.0.1.6:11211"}, + {"921515", "10.0.1.2:11211"}, + {"921748", "10.0.1.2:11211"}, + {"921981", "10.0.1.1:11211"}, + {"922214", "10.0.1.7:11211"}, + {"922447", "10.0.1.5:11211"}, + {"922680", "10.0.1.7:11211"}, + {"922913", "10.0.1.6:11211"}, + {"923146", "10.0.1.3:11211"}, + {"923379", "10.0.1.1:11211"}, + {"923612", "10.0.1.5:11211"}, + {"923845", "10.0.1.7:11211"}, + {"924078", "10.0.1.4:11211"}, + {"924311", "10.0.1.7:11211"}, + {"924544", "10.0.1.2:11211"}, + {"924777", "10.0.1.7:11211"}, + {"925010", "10.0.1.8:11211"}, + {"925243", "10.0.1.8:11211"}, + {"925476", "10.0.1.7:11211"}, + {"925709", "10.0.1.8:11211"}, + {"925942", "10.0.1.6:11211"}, + {"926175", "10.0.1.4:11211"}, + {"926408", "10.0.1.3:11211"}, + {"926641", "10.0.1.2:11211"}, + {"926874", "10.0.1.5:11211"}, + {"927107", "10.0.1.3:11211"}, + {"927340", "10.0.1.1:11211"}, + {"927573", "10.0.1.3:11211"}, + {"927806", "10.0.1.3:11211"}, + {"932699", "10.0.1.4:11211"}, + {"932932", "10.0.1.7:11211"}, + {"933165", "10.0.1.3:11211"}, + {"933398", "10.0.1.8:11211"}, + {"933631", "10.0.1.5:11211"}, + {"933864", "10.0.1.6:11211"}, + {"934097", "10.0.1.8:11211"}, + {"934330", "10.0.1.5:11211"}, + {"934563", "10.0.1.1:11211"}, + {"934796", "10.0.1.4:11211"}, + {"935029", "10.0.1.2:11211"}, + {"935262", "10.0.1.6:11211"}, + {"935495", "10.0.1.3:11211"}, + {"935728", "10.0.1.6:11211"}, + {"935961", "10.0.1.3:11211"}, + {"936194", "10.0.1.6:11211"}, + {"936427", "10.0.1.6:11211"}, + {"936660", "10.0.1.5:11211"}, + {"936893", "10.0.1.7:11211"}, + {"937126", "10.0.1.8:11211"}, + {"937359", "10.0.1.6:11211"}, + {"937592", "10.0.1.7:11211"}, + {"937825", "10.0.1.3:11211"}, + {"938058", "10.0.1.7:11211"}, + {"938291", "10.0.1.7:11211"}, + {"938524", "10.0.1.2:11211"}, + {"938757", "10.0.1.6:11211"}, + {"938990", "10.0.1.2:11211"}, + {"939223", "10.0.1.7:11211"}, + {"939456", "10.0.1.4:11211"}, + {"939689", "10.0.1.1:11211"}, + {"939922", "10.0.1.5:11211"}, + {"940155", "10.0.1.2:11211"}, + {"940388", "10.0.1.1:11211"}, + {"940621", "10.0.1.4:11211"}, + {"940854", "10.0.1.3:11211"}, + {"941087", "10.0.1.4:11211"}, + {"944815", "10.0.1.4:11211"}, + {"945048", "10.0.1.1:11211"}, + {"945281", "10.0.1.2:11211"}, + {"945514", "10.0.1.4:11211"}, + {"945747", "10.0.1.5:11211"}, + {"945980", "10.0.1.4:11211"}, + {"946213", "10.0.1.2:11211"}, + {"946446", "10.0.1.2:11211"}, + {"946679", "10.0.1.1:11211"}, + {"946912", "10.0.1.3:11211"}, + {"947145", "10.0.1.3:11211"}, + {"947378", "10.0.1.1:11211"}, + {"947611", "10.0.1.3:11211"}, + {"947844", "10.0.1.2:11211"}, + {"948077", "10.0.1.3:11211"}, + {"948310", "10.0.1.7:11211"}, + {"948543", "10.0.1.5:11211"}, + {"948776", "10.0.1.7:11211"}, + {"949009", "10.0.1.6:11211"}, + {"949242", "10.0.1.1:11211"}, + {"949475", "10.0.1.4:11211"}, + {"949708", "10.0.1.2:11211"}, + {"949941", "10.0.1.5:11211"}, + {"950174", "10.0.1.1:11211"}, + {"950407", "10.0.1.4:11211"}, + {"950640", "10.0.1.7:11211"}, + {"950873", "10.0.1.3:11211"}, + {"951106", "10.0.1.3:11211"}, + {"951339", "10.0.1.1:11211"}, + {"951572", "10.0.1.7:11211"}, + {"951805", "10.0.1.7:11211"}, + {"952038", "10.0.1.1:11211"}, + {"952271", "10.0.1.8:11211"}, + {"952504", "10.0.1.8:11211"}, + {"952737", "10.0.1.7:11211"}, + {"952970", "10.0.1.1:11211"}, + {"953203", "10.0.1.2:11211"}, + {"953436", "10.0.1.5:11211"}, + {"953669", "10.0.1.2:11211"}, + {"953902", "10.0.1.4:11211"}, + {"954135", "10.0.1.1:11211"}, + {"954368", "10.0.1.6:11211"}, + {"954601", "10.0.1.7:11211"}, + {"954834", "10.0.1.3:11211"}, + {"955067", "10.0.1.6:11211"}, + {"955300", "10.0.1.7:11211"}, + {"955533", "10.0.1.3:11211"}, + {"955766", "10.0.1.1:11211"}, + {"955999", "10.0.1.8:11211"}, + {"956232", "10.0.1.3:11211"}, + {"956465", "10.0.1.4:11211"}, + {"956698", "10.0.1.5:11211"}, + {"956931", "10.0.1.2:11211"}, + {"957164", "10.0.1.5:11211"}, + {"957397", "10.0.1.6:11211"}, + {"957630", "10.0.1.7:11211"}, + {"957863", "10.0.1.7:11211"}, + {"958096", "10.0.1.8:11211"}, + {"958329", "10.0.1.4:11211"}, + {"958562", "10.0.1.8:11211"}, + {"958795", "10.0.1.4:11211"}, + {"959028", "10.0.1.6:11211"}, + {"959261", "10.0.1.5:11211"}, + {"959494", "10.0.1.1:11211"}, + {"959727", "10.0.1.6:11211"}, + {"959960", "10.0.1.5:11211"}, + {"960193", "10.0.1.4:11211"}, + {"960426", "10.0.1.8:11211"}, + {"960659", "10.0.1.2:11211"}, + {"960892", "10.0.1.8:11211"}, + {"961125", "10.0.1.8:11211"}, + {"961358", "10.0.1.2:11211"}, + {"961591", "10.0.1.2:11211"}, + {"961824", "10.0.1.5:11211"}, + {"962057", "10.0.1.1:11211"}, + {"962290", "10.0.1.3:11211"}, + {"962523", "10.0.1.6:11211"}, + {"962756", "10.0.1.6:11211"}, + {"962989", "10.0.1.6:11211"}, + {"963222", "10.0.1.4:11211"}, + {"963455", "10.0.1.2:11211"}, + {"963688", "10.0.1.1:11211"}, + {"963921", "10.0.1.6:11211"}, + {"964154", "10.0.1.3:11211"}, + {"964387", "10.0.1.1:11211"}, + {"964620", "10.0.1.7:11211"}, + {"964853", "10.0.1.2:11211"}, + {"965086", "10.0.1.5:11211"}, + {"965319", "10.0.1.5:11211"}, + {"965552", "10.0.1.7:11211"}, + {"965785", "10.0.1.7:11211"}, + {"966018", "10.0.1.3:11211"}, + {"966251", "10.0.1.6:11211"}, + {"966484", "10.0.1.8:11211"}, + {"966717", "10.0.1.7:11211"}, + {"966950", "10.0.1.6:11211"}, + {"967183", "10.0.1.3:11211"}, + {"967416", "10.0.1.1:11211"}, + {"967649", "10.0.1.2:11211"}, + {"967882", "10.0.1.8:11211"}, + {"968115", "10.0.1.7:11211"}, + {"968348", "10.0.1.3:11211"}, + {"968581", "10.0.1.4:11211"}, + {"968814", "10.0.1.4:11211"}, + {"969047", "10.0.1.3:11211"}, + {"969280", "10.0.1.7:11211"}, + {"969513", "10.0.1.6:11211"}, + {"969746", "10.0.1.1:11211"}, + {"969979", "10.0.1.4:11211"}, + {"970212", "10.0.1.1:11211"}, + {"970445", "10.0.1.6:11211"}, + {"970678", "10.0.1.1:11211"}, + {"970911", "10.0.1.3:11211"}, + {"971144", "10.0.1.6:11211"}, + {"971377", "10.0.1.1:11211"}, + {"971610", "10.0.1.1:11211"}, + {"971843", "10.0.1.4:11211"}, + {"972076", "10.0.1.4:11211"}, + {"972309", "10.0.1.3:11211"}, + {"976037", "10.0.1.1:11211"}, + {"976270", "10.0.1.2:11211"}, + {"976503", "10.0.1.6:11211"}, + {"976736", "10.0.1.7:11211"}, + {"976969", "10.0.1.7:11211"}, + {"977202", "10.0.1.7:11211"}, + {"977435", "10.0.1.6:11211"}, + {"977668", "10.0.1.6:11211"}, + {"977901", "10.0.1.2:11211"}, + {"978134", "10.0.1.5:11211"}, + {"978367", "10.0.1.6:11211"}, + {"978600", "10.0.1.5:11211"}, + {"978833", "10.0.1.1:11211"}, + {"979066", "10.0.1.5:11211"}, + {"979299", "10.0.1.5:11211"}, + {"979532", "10.0.1.3:11211"}, + {"979765", "10.0.1.4:11211"}, + {"979998", "10.0.1.8:11211"}, + {"980231", "10.0.1.3:11211"}, + {"980464", "10.0.1.8:11211"}, + {"980697", "10.0.1.1:11211"}, + {"980930", "10.0.1.2:11211"}, + {"981163", "10.0.1.5:11211"}, + {"987454", "10.0.1.2:11211"}, + {"987687", "10.0.1.2:11211"}, + {"987920", "10.0.1.7:11211"}, + {"988153", "10.0.1.1:11211"}, + {"988386", "10.0.1.1:11211"}, + {"988619", "10.0.1.6:11211"}, + {"988852", "10.0.1.3:11211"}, + {"989085", "10.0.1.5:11211"}, + {"989318", "10.0.1.7:11211"}, + {"989551", "10.0.1.5:11211"}, + {"989784", "10.0.1.8:11211"}, + {"990017", "10.0.1.1:11211"}, + {"990250", "10.0.1.5:11211"}, + {"990483", "10.0.1.8:11211"}, + {"990716", "10.0.1.5:11211"}, + {"990949", "10.0.1.4:11211"}, + {"991182", "10.0.1.8:11211"}, + {"991415", "10.0.1.3:11211"}, + {"991648", "10.0.1.4:11211"}, + {"991881", "10.0.1.5:11211"}, + {"992114", "10.0.1.5:11211"}, + {"992347", "10.0.1.3:11211"}, + {"992580", "10.0.1.7:11211"}, + {"992813", "10.0.1.4:11211"}, + {"993046", "10.0.1.6:11211"}, + {"993279", "10.0.1.3:11211"}, + {"993512", "10.0.1.5:11211"}, + {"993745", "10.0.1.4:11211"}, + {"993978", "10.0.1.7:11211"}, + {"994211", "10.0.1.7:11211"}, + {"994444", "10.0.1.5:11211"}, + {"994677", "10.0.1.1:11211"}, + {"994910", "10.0.1.7:11211"}, + {"995143", "10.0.1.7:11211"}, + {"995376", "10.0.1.4:11211"}, + {"995609", "10.0.1.1:11211"}, + {"995842", "10.0.1.6:11211"}, + {"996075", "10.0.1.6:11211"}, + {"996308", "10.0.1.6:11211"}, + {"996541", "10.0.1.2:11211"}, + {"996774", "10.0.1.6:11211"}, + {"997007", "10.0.1.7:11211"}, + {"997240", "10.0.1.2:11211"}, + {"997473", "10.0.1.1:11211"}, + {"997706", "10.0.1.4:11211"}, + {"999104", "10.0.1.8:11211"}, + {"999337", "10.0.1.4:11211"}, + {"999570", "10.0.1.6:11211"}, + {"999803", "10.0.1.4:11211"} + }; + + for (String[] s : exp) { + String k = s[0]; + String server = s[1]; + MemcachedNode n=locator.getPrimary(k); + assertEquals("/" + server, n.getSocketAddress().toString()); + } + + } +} diff --git a/src/test/java/net/spy/memcached/ArrayModNodeLocatorTest.java b/src/test/java/net/spy/memcached/ArrayModNodeLocatorTest.java new file mode 100644 index 000000000..7e7c58dfb --- /dev/null +++ b/src/test/java/net/spy/memcached/ArrayModNodeLocatorTest.java @@ -0,0 +1,70 @@ +package net.spy.memcached; + +import java.util.Arrays; +import java.util.Collection; + +/** + * Test the ArrayModNodeLocator. + */ +public class ArrayModNodeLocatorTest extends AbstractNodeLocationCase { + + @Override + protected void setupNodes(int n) { + super.setupNodes(n); + locator=new ArrayModNodeLocator(Arrays.asList(nodes), + HashAlgorithm.NATIVE_HASH); + } + + public void testPrimary() throws Exception { + setupNodes(4); + assertSame(nodes[3], locator.getPrimary("dustin")); + assertSame(nodes[0], locator.getPrimary("x")); + assertSame(nodes[1], locator.getPrimary("y")); + } + + public void testPrimaryClone() throws Exception { + setupNodes(4); + assertEquals(nodes[3].toString(), + locator.getReadonlyCopy().getPrimary("dustin").toString()); + assertEquals(nodes[0].toString(), + locator.getReadonlyCopy().getPrimary("x").toString()); + assertEquals(nodes[1].toString(), + locator.getReadonlyCopy().getPrimary("y").toString()); + } + + public void testAll() throws Exception { + setupNodes(4); + Collection all = locator.getAll(); + assertEquals(4, all.size()); + assertTrue(all.contains(nodes[0])); + assertTrue(all.contains(nodes[1])); + assertTrue(all.contains(nodes[2])); + assertTrue(all.contains(nodes[3])); + } + + public void testAllClone() throws Exception { + setupNodes(4); + Collection all = locator.getReadonlyCopy().getAll(); + assertEquals(4, all.size()); + } + + public void testSeq1() { + setupNodes(4); + assertSequence("dustin", 0, 1, 2); + } + + public void testSeq2() { + setupNodes(4); + assertSequence("noelani", 1, 2, 3); + } + + public void testSeqOnlyOneServer() { + setupNodes(1); + assertSequence("noelani"); + } + + public void testSeqWithTwoNodes() { + setupNodes(2); + assertSequence("dustin", 0); + } +} diff --git a/src/test/java/net/spy/memcached/AsciiCancellationTest.java b/src/test/java/net/spy/memcached/AsciiCancellationTest.java new file mode 100644 index 000000000..69c857cf0 --- /dev/null +++ b/src/test/java/net/spy/memcached/AsciiCancellationTest.java @@ -0,0 +1,8 @@ +package net.spy.memcached; + +/** + * Test cancellation in ascii protocol. + */ +public class AsciiCancellationTest extends CancellationBaseCase { + // uses defaults +} diff --git a/src/test/java/net/spy/memcached/AsciiClientTest.java b/src/test/java/net/spy/memcached/AsciiClientTest.java new file mode 100644 index 000000000..12c4b03c3 --- /dev/null +++ b/src/test/java/net/spy/memcached/AsciiClientTest.java @@ -0,0 +1,40 @@ +package net.spy.memcached; + +import java.nio.ByteBuffer; + +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.protocol.ascii.ExtensibleOperationImpl; + +/** + * This test assumes a client is running on localhost:11211. + */ +public class AsciiClientTest extends ProtocolBaseCase { + + public void testBadOperation() throws Exception { + client.addOp("x", new ExtensibleOperationImpl(new OperationCallback(){ + public void complete() { + System.err.println("Complete."); + } + + public void receivedStatus(OperationStatus s) { + System.err.println("Received a line."); + }}) { + + @Override + public void handleLine(String line) { + System.out.println("Woo! A line!"); + } + + @Override + public void initialize() { + setBuffer(ByteBuffer.wrap("garbage\r\n".getBytes())); + }}); + } + + @Override + protected String getExpectedVersionSource() { + return "/127.0.0.1:11211"; + } + +} diff --git a/src/test/java/net/spy/memcached/AsciiIPV6ClientTest.java b/src/test/java/net/spy/memcached/AsciiIPV6ClientTest.java new file mode 100644 index 000000000..af24f40c2 --- /dev/null +++ b/src/test/java/net/spy/memcached/AsciiIPV6ClientTest.java @@ -0,0 +1,19 @@ +package net.spy.memcached; + +/** + * Test the test protocol over IPv6. + */ +public class AsciiIPV6ClientTest extends AsciiClientTest { + + @Override + protected void initClient(ConnectionFactory cf) throws Exception { + client=new MemcachedClient(cf, + AddrUtil.getAddresses("::1:11211")); + } + + @Override + protected String getExpectedVersionSource() { + return "/0:0:0:0:0:0:0:1:11211"; + } + +} diff --git a/src/test/java/net/spy/memcached/BinaryCancellationTest.java b/src/test/java/net/spy/memcached/BinaryCancellationTest.java new file mode 100644 index 000000000..d73ff0c06 --- /dev/null +++ b/src/test/java/net/spy/memcached/BinaryCancellationTest.java @@ -0,0 +1,18 @@ +package net.spy.memcached; + +/** + * Test cancellation in the binary protocol. + */ +public class BinaryCancellationTest extends CancellationBaseCase { + + @Override + protected void initClient() throws Exception { + initClient(new BinaryConnectionFactory() { + @Override + public FailureMode getFailureMode() { + return FailureMode.Retry; + } + }); + } + +} diff --git a/src/test/java/net/spy/memcached/BinaryClientTest.java b/src/test/java/net/spy/memcached/BinaryClientTest.java new file mode 100644 index 000000000..e88f749e9 --- /dev/null +++ b/src/test/java/net/spy/memcached/BinaryClientTest.java @@ -0,0 +1,65 @@ +package net.spy.memcached; + + +/** + * This test assumes a binary server is running on localhost:11211. + */ +public class BinaryClientTest extends ProtocolBaseCase { + + @Override + protected void initClient() throws Exception { + initClient(new BinaryConnectionFactory() { + @Override + public long getOperationTimeout() { + return 15000; + } + @Override + public FailureMode getFailureMode() { + return FailureMode.Retry; + } + }); + } + + @Override + protected String getExpectedVersionSource() { + return "/127.0.0.1:11211"; + } + + @Override + public void testGetStatsCacheDump() throws Exception { + // XXX: Cachedump isn't returning anything from the server in binprot + assertTrue(true); + } + + public void testCASAppendFail() throws Exception { + final String key="append.key"; + assertTrue(client.set(key, 5, "test").get()); + CASValue casv = client.gets(key); + assertFalse(client.append(casv.getCas() + 1, key, "es").get()); + assertEquals("test", client.get(key)); + } + + public void testCASAppendSuccess() throws Exception { + final String key="append.key"; + assertTrue(client.set(key, 5, "test").get()); + CASValue casv = client.gets(key); + assertTrue(client.append(casv.getCas(), key, "es").get()); + assertEquals("testes", client.get(key)); + } + + public void testCASPrependFail() throws Exception { + final String key="append.key"; + assertTrue(client.set(key, 5, "test").get()); + CASValue casv = client.gets(key); + assertFalse(client.prepend(casv.getCas() + 1, key, "es").get()); + assertEquals("test", client.get(key)); + } + + public void testCASPrependSuccess() throws Exception { + final String key="append.key"; + assertTrue(client.set(key, 5, "test").get()); + CASValue casv = client.gets(key); + assertTrue(client.prepend(casv.getCas(), key, "es").get()); + assertEquals("estest", client.get(key)); + } +} diff --git a/src/test/java/net/spy/memcached/BinaryIPV6ClientTest.java b/src/test/java/net/spy/memcached/BinaryIPV6ClientTest.java new file mode 100644 index 000000000..140c04939 --- /dev/null +++ b/src/test/java/net/spy/memcached/BinaryIPV6ClientTest.java @@ -0,0 +1,19 @@ +package net.spy.memcached; + +/** + * Binary IPv6 client test. + */ +public class BinaryIPV6ClientTest extends BinaryClientTest { + + @Override + protected void initClient(ConnectionFactory cf) throws Exception { + client=new MemcachedClient(cf, + AddrUtil.getAddresses("::1:11211")); + } + + @Override + protected String getExpectedVersionSource() { + return "/0:0:0:0:0:0:0:1:11211"; + } + +} diff --git a/src/test/java/net/spy/memcached/CASMutatorTest.java b/src/test/java/net/spy/memcached/CASMutatorTest.java new file mode 100644 index 000000000..73a2e2c65 --- /dev/null +++ b/src/test/java/net/spy/memcached/CASMutatorTest.java @@ -0,0 +1,69 @@ +package net.spy.memcached; + +import java.util.concurrent.Callable; + +import net.spy.memcached.compat.SyncThread; +import net.spy.memcached.transcoders.LongTranscoder; + +/** + * Test the CAS mutator. + */ +public class CASMutatorTest extends ClientBaseCase { + + private CASMutation mutation; + private CASMutator mutator; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mutation=new CASMutation() { + public Long getNewValue(Long current) { + return current+1; + } + }; + mutator=new CASMutator(client, new LongTranscoder(), 50); + } + + public void testDefaultConstructor() { + // Just validate that this doesn't throw an exception. + new CASMutator(client, new LongTranscoder()); + } + + public void testConcurrentCAS() throws Throwable { + int num=SyncThread.getDistinctResultCount(20, new Callable(){ + public Long call() throws Exception { + return mutator.cas("test.cas.concurrent", 0L, 0, mutation); + }}); + assertEquals(20, num); + } + + public void testIncorrectTypeInCAS() throws Throwable { + // Stick something for this CAS in the cache. + client.set("x", 0, "not a long"); + try { + Long rv=mutator.cas("x", 1L, 0, mutation); + fail("Expected RuntimeException on invalid type mutation, got " + + rv); + } catch(RuntimeException e) { + assertEquals("Couldn't get a CAS in 50 attempts", e.getMessage()); + } + } + + public void testCASUpdateWithNullInitial() throws Throwable { + client.set("x", 0, 1L); + Long rv=mutator.cas("x", (Long)null, 0, mutation); + assertEquals(rv, (Long)2L); + } + + public void testCASUpdateWithNullInitialNoExistingVal() throws Throwable { + assertNull(client.get("x")); + Long rv=mutator.cas("x", (Long)null, 0, mutation); + assertNull(rv); + assertNull(client.get("x")); + } + + public void testCASValueToString() { + CASValue c=new CASValue(717L, "hi"); + assertEquals("{CasValue 717/hi}", c.toString()); + } +} diff --git a/src/test/java/net/spy/memcached/CacheMapTest.java b/src/test/java/net/spy/memcached/CacheMapTest.java new file mode 100644 index 000000000..8b9a8f063 --- /dev/null +++ b/src/test/java/net/spy/memcached/CacheMapTest.java @@ -0,0 +1,149 @@ +package net.spy.memcached; + +import java.lang.reflect.Field; +import java.util.HashMap; +import java.util.Map; + +import net.spy.memcached.transcoders.IntegerTranscoder; +import net.spy.memcached.transcoders.SerializingTranscoder; +import net.spy.memcached.transcoders.Transcoder; + +import org.jmock.Mock; +import org.jmock.MockObjectTestCase; + +/** + * Test the CacheMap. + */ +public class CacheMapTest extends MockObjectTestCase { + + private final static int EXP = 8175; + private Mock clientMock; + private MemcachedClientIF client; + private Transcoder transcoder; + private CacheMap cacheMap; + + @Override + protected void setUp() throws Exception { + super.setUp(); + transcoder = new SerializingTranscoder(); + clientMock = mock(MemcachedClientIF.class); + clientMock.expects(once()).method("getTranscoder") + .will(returnValue(transcoder)); + client = (MemcachedClientIF) clientMock.proxy(); + cacheMap=new CacheMap(client, EXP, "blah"); + } + + private void expectGetAndReturn(String k, Object value) { + clientMock.expects(once()).method("get") + .with(eq(k), same(transcoder)) + .will(returnValue(value)); + } + + public void testNoExpConstructor() throws Exception { + clientMock.expects(once()).method("getTranscoder") + .will(returnValue(transcoder)); + + CacheMap cm = new CacheMap(client, "blah"); + Field f = BaseCacheMap.class.getDeclaredField("exp"); + f.setAccessible(true); + assertEquals(0, f.getInt(cm)); + } + + public void testBaseConstructor() throws Exception { + BaseCacheMap bcm = new BaseCacheMap(client, + EXP, "base", new IntegerTranscoder()); + Field f = BaseCacheMap.class.getDeclaredField("exp"); + f.setAccessible(true); + assertEquals(EXP, f.getInt(bcm)); + } + + public void testClear() { + try { + cacheMap.clear(); + fail("Expected unsupported operation exception"); + } catch(UnsupportedOperationException e) { + // pass + } + } + + public void testGetPositive() { + expectGetAndReturn("blaha", "something"); + assertEquals("something", cacheMap.get("a")); + } + + public void testGetNegative() { + expectGetAndReturn("blaha", null); + assertNull(cacheMap.get("a")); + } + + public void testGetNotString() { + assertNull(cacheMap.get(new Object())); + } + + public void testContainsPositive() { + expectGetAndReturn("blaha", new Object()); + assertTrue(cacheMap.containsKey("a")); + } + + public void testContainsNegative() { + expectGetAndReturn("blaha", null); + assertFalse(cacheMap.containsKey("a")); + } + + public void testContainsValue() { + assertFalse(cacheMap.containsValue("anything")); + } + + public void testEntrySet() { + assertEquals(0, cacheMap.entrySet().size()); + } + + public void testKeySet() { + assertEquals(0, cacheMap.keySet().size()); + } + + public void testtIsEmpty() { + assertFalse(cacheMap.isEmpty()); + } + + public void testPutAll() { + clientMock.expects(once()).method("set") + .with(eq("blaha"), eq(EXP), eq("vala")); + clientMock.expects(once()).method("set") + .with(eq("blahb"), eq(EXP), eq("valb")); + + Map m = new HashMap(); + m.put("a", "vala"); + m.put("b", "valb"); + + cacheMap.putAll(m); + } + + public void testSize() { + assertEquals(0, cacheMap.size()); + } + + public void testValues() { + assertEquals(0, cacheMap.values().size()); + } + + public void testRemove() { + expectGetAndReturn("blaha", "olda"); + clientMock.expects(once()).method("delete").with(eq("blaha")); + + assertEquals("olda", cacheMap.remove("a")); + } + + public void testRemoveNotString() { + assertNull(cacheMap.remove(new Object())); + } + + public void testPut() { + expectGetAndReturn("blaha", "olda"); + clientMock.expects(once()).method("set") + .with(eq("blaha"), eq(EXP), eq("newa")); + + assertEquals("olda", cacheMap.put("a", "newa")); + } + +} diff --git a/src/test/java/net/spy/memcached/CacheMonitorTest.java b/src/test/java/net/spy/memcached/CacheMonitorTest.java new file mode 100644 index 000000000..be702c0bb --- /dev/null +++ b/src/test/java/net/spy/memcached/CacheMonitorTest.java @@ -0,0 +1,166 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.List; + +import net.spy.memcached.CacheMonitor.CacheMonitorListener; + +import org.apache.zookeeper.KeeperException.Code; +import org.apache.zookeeper.WatchedEvent; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.Watcher.Event.EventType; +import org.apache.zookeeper.Watcher.Event.KeeperState; +import org.apache.zookeeper.ZooKeeper; +import org.jmock.Mock; +import org.jmock.MockObjectTestCase; + +public class CacheMonitorTest extends MockObjectTestCase { + + private Mock watcher; + private Mock listener; + private ZooKeeper zooKeeper; + private CacheMonitor cacheMonitor; + private List children; + + private static final String serviceCode = "dev"; + + @Override + public void setUp() throws Exception { + listener = mock(CacheMonitorListener.class); + watcher = mock(Watcher.class); + zooKeeper = new ZooKeeper("", 15000, (Watcher)watcher.proxy()); // can't mock + children = new ArrayList(); + + cacheMonitor = new CacheMonitor(zooKeeper, serviceCode, + (CacheMonitorListener) listener.proxy()); + } + + @Override + public void tearDown() throws Exception { + zooKeeper.close(); + } + + public void testProcessResult() { + // when + children.add("0.0.0.0:11211"); + listener.expects(once()).method("commandNodeChange").with(eq(children)); + + // test + cacheMonitor.processResult(Code.OK.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + + // then + assertEquals(children, cacheMonitor.prevChildren); + } + + public void testProcessResult_emptyChildren() { + List fakeChildren = new ArrayList(); + fakeChildren.add("0.0.0.0:23456"); + + // when : empty children + listener.expects(once()).method("commandNodeChange").with(eq(fakeChildren)); + + // test + cacheMonitor.processResult(Code.OK.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + + // then + assertEquals(fakeChildren, cacheMonitor.prevChildren); + } + + public void testProcessResult_otherEvents() { + children.add("127.0.0.1:11211"); + listener.expects(never()).method("commandNodeChange"); + + Code code; + + code = Code.NONODE; + { + cacheMonitor.processResult(code.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + // do nothing + } + + code = Code.SESSIONEXPIRED; + { + listener.expects(once()).method("closing"); + cacheMonitor.processResult(code.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + assertTrue(cacheMonitor.dead); + } + + code = Code.NOAUTH; + { + listener.expects(once()).method("closing"); + cacheMonitor.processResult(code.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + assertTrue(cacheMonitor.dead); + } + + code = Code.CONNECTIONLOSS; + { + cacheMonitor.processResult(code.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + } + + code = Code.SESSIONMOVED; + { + cacheMonitor.processResult(code.intValue(), CacheManager.CACHE_LIST_PATH + serviceCode, null, children); + } + } + + public void testProcess_syncConnected() throws Exception { + // when + WatchedEvent event = new WatchedEvent(EventType.None, KeeperState.SyncConnected, CacheManager.CACHE_LIST_PATH + "/dev"); + + // test + cacheMonitor.process(event); + + // then + // do nothing + } + + public void testProcess_disconnected() throws Exception { + // when + WatchedEvent event = new WatchedEvent(EventType.None, KeeperState.Disconnected, CacheManager.CACHE_LIST_PATH + "/dev"); + + // test + cacheMonitor.process(event); + + // then + // do nothing + } + + public void testProcess_expired() throws Exception { + // when + WatchedEvent event = new WatchedEvent(EventType.None, KeeperState.Expired, CacheManager.CACHE_LIST_PATH + "/dev"); + listener.expects(once()).method("closing"); + + // test + cacheMonitor.process(event); + + // then + assertTrue(cacheMonitor.dead); + } + + public void testProcess_nodeChildrenChanged() throws Exception { + // when + WatchedEvent event = new WatchedEvent(EventType.NodeChildrenChanged, KeeperState.SyncConnected, CacheManager.CACHE_LIST_PATH + "/dev"); + + // test + cacheMonitor.process(event); + + // then + // do nothing + } +} diff --git a/src/test/java/net/spy/memcached/CancelFailureModeTest.java b/src/test/java/net/spy/memcached/CancelFailureModeTest.java new file mode 100644 index 000000000..932ff4740 --- /dev/null +++ b/src/test/java/net/spy/memcached/CancelFailureModeTest.java @@ -0,0 +1,51 @@ +package net.spy.memcached; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +public class CancelFailureModeTest extends ClientBaseCase { + private String serverList; + + @Override + protected void setUp() throws Exception { + serverList="127.0.0.1:11211 127.0.0.1:11311"; + super.setUp(); + } + + @Override + protected void tearDown() throws Exception { + serverList="127.0.0.1:11211"; + super.tearDown(); + } + + @Override + protected void initClient(ConnectionFactory cf) throws Exception { + client=new MemcachedClient(cf, AddrUtil.getAddresses(serverList)); + } + + @Override + protected void initClient() throws Exception { + initClient(new DefaultConnectionFactory() { + @Override + public FailureMode getFailureMode() { + return FailureMode.Cancel; + } + }); + } + + @Override + protected void flushPause() throws InterruptedException { + Thread.sleep(100); + } + + public void testQueueingToDownServer() throws Exception { + Future f=client.add("someKey", 0, "some object"); + try { + boolean b = f.get(); + fail("Should've thrown an exception, returned " + b); + } catch (ExecutionException e) { + // probably OK + } + assertTrue(f.isCancelled()); + } +} diff --git a/src/test/java/net/spy/memcached/CancellationBaseCase.java b/src/test/java/net/spy/memcached/CancellationBaseCase.java new file mode 100644 index 000000000..4cdb58c9f --- /dev/null +++ b/src/test/java/net/spy/memcached/CancellationBaseCase.java @@ -0,0 +1,110 @@ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Base class for cancellation tests. + */ +public abstract class CancellationBaseCase extends ClientBaseCase { + + @Override + protected void tearDown() throws Exception { + // override teardown to avoid the flush phase + client.shutdown(); + } + + @Override + protected void initClient(ConnectionFactory cf) throws Exception { + client=new MemcachedClient(cf, + AddrUtil.getAddresses("127.0.0.1:64213")); + } + + private void tryCancellation(Future f) throws Exception { + f.cancel(true); + assertTrue(f.isCancelled()); + assertTrue(f.isDone()); + try { + Object o=f.get(); + fail("Expected cancellation, got " + o); + } catch (ExecutionException e) { + assertTrue(e.getCause() instanceof RuntimeException); + assertEquals("Cancelled", e.getCause().getMessage()); + } + } + + public void testAvailableServers() { + client.asyncGet("x"); + assertEquals(Collections.emptyList(), client.getAvailableServers()); + } + + public void testUnavailableServers() { + client.asyncGet("x"); + assertEquals(new ArrayList( + Collections.singleton("/127.0.0.1:64213")), + stringify(client.getUnavailableServers())); + } + + private void tryTimeout(Future f) throws Exception { + try { + Object o=f.get(10, TimeUnit.MILLISECONDS); + fail("Expected timeout, got " + o); + } catch(TimeoutException e) { + // expected + } + } + + protected void tryTestSequence(Future f) throws Exception { + tryTimeout(f); + tryCancellation(f); + } + + public void testAsyncGetCancellation() throws Exception { + tryTestSequence(client.asyncGet("k")); + } + + public void testAsyncGetsCancellation() throws Exception { + tryTestSequence(client.asyncGets("k")); + } + + public void testAsyncGetBulkCancellationCollection() throws Exception { + tryTestSequence(client.asyncGetBulk(Arrays.asList("k", "k2"))); + } + + public void testAsyncGetBulkCancellationVararg() throws Exception { + tryTestSequence(client.asyncGetBulk("k", "k2")); + } + + public void testDeleteCancellation() throws Exception { + tryTestSequence(client.delete("x")); + } + + public void testflushCancellation() throws Exception { + tryTestSequence(client.flush()); + } + + public void testDelayedflushCancellation() throws Exception { + tryTestSequence(client.flush(3)); + } + + public void testReplaceCancellation() throws Exception { + tryTestSequence(client.replace("x", 3, "y")); + } + + public void testAddCancellation() throws Exception { + tryTestSequence(client.add("x", 3, "y")); + } + + public void testSetCancellation() throws Exception { + tryTestSequence(client.set("x", 3, "y")); + } + + public void testCASCancellation() throws Exception { + tryTestSequence(client.asyncCAS("x", 3, "y")); + } +} diff --git a/src/test/java/net/spy/memcached/ClientBaseCase.java b/src/test/java/net/spy/memcached/ClientBaseCase.java new file mode 100644 index 000000000..15b3f5156 --- /dev/null +++ b/src/test/java/net/spy/memcached/ClientBaseCase.java @@ -0,0 +1,312 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; + +import junit.framework.TestCase; +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.transcoders.Transcoder; + +public abstract class ClientBaseCase extends TestCase { + + protected static String ZK_HOST = System.getProperty("ZK_HOST", + "127.0.0.1:2181"); + + protected static String ZK_SERVICE_ID = System.getProperty("ZK_SERVICE_ID", + "test"); + + protected static String ARCUS_HOST = System + .getProperty("ARCUS_HOST", + "127.0.0.1:11211"); + + protected static boolean USE_ZK = Boolean.valueOf(System.getProperty( + "USE_ZK", "false")); + + protected static boolean SHUTDOWN_AFTER_EACH_TEST = USE_ZK; + + static { + System.out.println("---------------------------------------------"); + System.out.println("[ArcusClient initialization info.]"); + System.out.println("USE_ZK=" + USE_ZK); + System.out.println("SHUTDOWN_AFTER_EACH_TEST=" + USE_ZK); + if (USE_ZK) { + System.out.println("ZK_HOST=" + ZK_HOST + ", ZK_SERVICE_ID=" + + ZK_SERVICE_ID); + } else { + System.out.println("ARCUS_HOST=" + ARCUS_HOST); + } + System.out.println("---------------------------------------------"); + } + + protected MemcachedClient client = null; + + private static class CFB extends ConnectionFactoryBuilder { + + private final ConnectionFactory inner; + + public CFB(ConnectionFactory cf) { + this.inner = cf; + } + + @Override + public ConnectionFactory build() { + return new ConnectionFactory() { + @Override + public MemcachedConnection createConnection( + List addrs) throws IOException { + return inner.createConnection(addrs); + } + + @Override + public MemcachedNode createMemcachedNode(SocketAddress sa, + SocketChannel c, int bufSize) { + return inner.createMemcachedNode(sa, c, bufSize); + } + + @Override + public BlockingQueue createOperationQueue() { + return inner.createOperationQueue(); + } + + @Override + public BlockingQueue createReadOperationQueue() { + return inner.createReadOperationQueue(); + } + + @Override + public BlockingQueue createWriteOperationQueue() { + return inner.createWriteOperationQueue(); + } + + @Override + public long getOpQueueMaxBlockTime() { + return inner.getOpQueueMaxBlockTime(); + } + + @Override + public NodeLocator createLocator(List nodes) { + return inner.createLocator(nodes); + } + + @Override + public OperationFactory getOperationFactory() { + return inner.getOperationFactory(); + } + + @Override + public long getOperationTimeout() { + return inner.getOperationTimeout(); + } + + @Override + public boolean isDaemon() { + return inner.isDaemon(); + } + + @Override + public boolean useNagleAlgorithm() { + return inner.useNagleAlgorithm(); + } + + @Override + public Collection getInitialObservers() { + return inner.getInitialObservers(); + } + + @Override + public FailureMode getFailureMode() { + return inner.getFailureMode(); + } + + @Override + public Transcoder getDefaultTranscoder() { + return inner.getDefaultTranscoder(); + } + + @Override + public Transcoder getDefaultCollectionTranscoder() { + return inner.getDefaultCollectionTranscoder(); + } + + @Override + public boolean shouldOptimize() { + return inner.shouldOptimize(); + } + + @Override + public int getReadBufSize() { + return inner.getReadBufSize(); + } + + @Override + public HashAlgorithm getHashAlg() { + return inner.getHashAlg(); + } + + @Override + public long getMaxReconnectDelay() { + return inner.getMaxReconnectDelay(); + } + + @Override + public AuthDescriptor getAuthDescriptor() { + return inner.getAuthDescriptor(); + } + + @Override + public int getTimeoutExceptionThreshold() { + return inner.getTimeoutExceptionThreshold(); + } + + @Override + public int getMaxFrontCacheElements() { + return inner.getMaxFrontCacheElements(); + } + + @Override + public int getFrontCacheExpireTime() { + return inner.getFrontCacheExpireTime(); + } + + @Override + public int getBulkServiceThreadCount() { + return inner.getBulkServiceThreadCount(); + } + + @Override + public int getBulkServiceLoopLimit() { + return inner.getBulkServiceLoopLimit(); + } + + @Override + public long getBulkServiceSingleOpTimeout() { + return inner.getBulkServiceSingleOpTimeout(); + } + + @Override + public int getDefaultMaxSMGetKeyChunkSize() { + return inner.getDefaultMaxSMGetKeyChunkSize(); + } + + @Override + public String getFrontCacheName() { + return inner.getFrontCacheName(); + } + }; + } + + @Override + public ConnectionFactoryBuilder setInitialObservers( + Collection obs) { + return this; + } + } + + protected void initClient() throws Exception { + initClient(new DefaultConnectionFactory() { + @Override + public long getOperationTimeout() { + return 15000; + } + + @Override + public FailureMode getFailureMode() { + return FailureMode.Retry; + } + }); + } + + protected void initClient(ConnectionFactory cf) throws Exception { + if (USE_ZK) { + openFromZK(new CFB(cf)); + } else { + openDirect(new CFB(cf)); + } + } + + protected void openFromZK(ConnectionFactoryBuilder cfb) { + client = ArcusClient.createArcusClient(ZK_HOST, ZK_SERVICE_ID, cfb); + } + + protected void openDirect(CFB cfb) throws Exception { + final CountDownLatch latch = new CountDownLatch( + ARCUS_HOST.split(",").length); + + final ConnectionObserver obs = new ConnectionObserver() { + @Override + public void connectionEstablished(SocketAddress sa, + int reconnectCount) { + latch.countDown(); + } + + @Override + public void connectionLost(SocketAddress sa) { + assert false : "Connection is failed."; + } + + }; + cfb.setInitialObservers(Collections.singleton(obs)); + + client = new ArcusClient(cfb.build(), AddrUtil.getAddresses(ARCUS_HOST)); +// latch.await(); + Thread.sleep(1000L); + } + + protected Collection stringify(Collection c) { + Collection rv = new ArrayList(); + for (Object o : c) { + rv.add(String.valueOf(o)); + } + return rv; + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + initClient(); + } + + @Override + protected void tearDown() throws Exception { + // Shut down, start up, flush, and shut down again. Error tests have + // unpredictable timing issues. + client.shutdown(); + client = null; + initClient(); + flushPause(); + assertTrue(client.flush().get()); + client.shutdown(); + client = null; + super.tearDown(); + } + + protected void flushPause() throws InterruptedException { + // nothing useful + } + +} diff --git a/src/test/java/net/spy/memcached/ConnectionFactoryBuilderTest.java b/src/test/java/net/spy/memcached/ConnectionFactoryBuilderTest.java new file mode 100644 index 000000000..a0027b725 --- /dev/null +++ b/src/test/java/net/spy/memcached/ConnectionFactoryBuilderTest.java @@ -0,0 +1,204 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.channels.SocketChannel; +import java.util.Collections; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +import net.spy.memcached.ConnectionFactoryBuilder.Locator; +import net.spy.memcached.ConnectionFactoryBuilder.Protocol; +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.auth.PlainCallbackHandler; +import net.spy.memcached.compat.BaseMockCase; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationQueueFactory; +import net.spy.memcached.protocol.ascii.AsciiMemcachedNodeImpl; +import net.spy.memcached.protocol.ascii.AsciiOperationFactory; +import net.spy.memcached.protocol.binary.BinaryMemcachedNodeImpl; +import net.spy.memcached.protocol.binary.BinaryOperationFactory; +import net.spy.memcached.transcoders.SerializingTranscoder; +import net.spy.memcached.transcoders.WhalinTranscoder; + +/** + * Test the connection factory builder. + */ +public class ConnectionFactoryBuilderTest extends BaseMockCase { + + private ConnectionFactoryBuilder b; + + @Override + protected void setUp() throws Exception { + super.setUp(); + b = new ConnectionFactoryBuilder(); + } + + public void testDefaults() throws Exception { + ConnectionFactory f = b.build(); + assertEquals(DefaultConnectionFactory.DEFAULT_OPERATION_TIMEOUT, + f.getOperationTimeout()); + assertEquals(DefaultConnectionFactory.DEFAULT_READ_BUFFER_SIZE, + f.getReadBufSize()); + //assertSame(DefaultConnectionFactory.DEFAULT_HASH, f.getHashAlg()); + assertSame(HashAlgorithm.KETAMA_HASH, f.getHashAlg()); + assertTrue(f.getDefaultTranscoder() instanceof SerializingTranscoder); + //assertSame(DefaultConnectionFactory.DEFAULT_FAILURE_MODE, + // f.getFailureMode()); + assertSame(FailureMode.Cancel, f.getFailureMode()); + assertEquals(0, f.getInitialObservers().size()); + assertTrue(f.getOperationFactory() instanceof AsciiOperationFactory); + + BlockingQueue opQueue = f.createOperationQueue(); + assertTrue(opQueue instanceof ArrayBlockingQueue); + assertEquals(DefaultConnectionFactory.DEFAULT_OP_QUEUE_LEN, + opQueue.remainingCapacity()); + + BlockingQueue readOpQueue = f.createReadOperationQueue(); + assertTrue(readOpQueue instanceof LinkedBlockingQueue); + + BlockingQueue writeOpQueue = f.createWriteOperationQueue(); + assertTrue(writeOpQueue instanceof LinkedBlockingQueue); + + // This test case fails. Arcus Ketama locator builds the hash ring + // during construction. Cannot get around the failure. So, don't run... + // + // ...: unexpected invocation + // Invoked: mockMemcachedNode.getSocketAddress() + /* + MemcachedNode n = (MemcachedNode)mock(MemcachedNode.class).proxy(); + assertTrue(f.createLocator(Collections.singletonList(n)) + instanceof ArrayModNodeLocator); + */ + + SocketChannel sc = SocketChannel.open(); + try { + assertTrue(f.createMemcachedNode( + InetSocketAddress.createUnresolved("localhost", 11211), + sc, 1) + instanceof AsciiMemcachedNodeImpl); + } finally { + sc.close(); + } + + assertTrue(f.isDaemon()); + assertFalse(f.shouldOptimize()); + assertFalse(f.useNagleAlgorithm()); + assertEquals(f.getOpQueueMaxBlockTime(), + DefaultConnectionFactory.DEFAULT_OP_QUEUE_MAX_BLOCK_TIME); + } + + public void testModifications() throws Exception { + ConnectionObserver testObserver = new ConnectionObserver() { + public void connectionLost(SocketAddress sa) { + // none + } + public void connectionEstablished(SocketAddress sa, int reconnectCount) { + // none + } + }; + BlockingQueue oQueue = new LinkedBlockingQueue(); + BlockingQueue rQueue = new LinkedBlockingQueue(); + BlockingQueue wQueue = new LinkedBlockingQueue(); + + OperationQueueFactory opQueueFactory = new DirectFactory(oQueue); + OperationQueueFactory rQueueFactory = new DirectFactory(rQueue); + OperationQueueFactory wQueueFactory = new DirectFactory(wQueue); + AuthDescriptor anAuthDescriptor = new AuthDescriptor(new String[]{"PLAIN"}, + new PlainCallbackHandler("username", "password")); + + ConnectionFactory f = b.setDaemon(true) + .setShouldOptimize(false) + .setFailureMode(FailureMode.Redistribute) + .setHashAlg(HashAlgorithm.KETAMA_HASH) + .setInitialObservers(Collections.singleton(testObserver)) + .setOpFact(new BinaryOperationFactory()) + .setOpTimeout(4225) + .setOpQueueFactory(opQueueFactory) + .setReadOpQueueFactory(rQueueFactory) + .setWriteOpQueueFactory(wQueueFactory) + .setReadBufferSize(19) + .setTranscoder(new WhalinTranscoder()) + .setUseNagleAlgorithm(true) + .setLocatorType(Locator.CONSISTENT) + .setOpQueueMaxBlockTime(19) + .setAuthDescriptor(anAuthDescriptor) + .build(); + + assertEquals(4225, f.getOperationTimeout()); + assertEquals(19, f.getReadBufSize()); + assertSame(HashAlgorithm.KETAMA_HASH, f.getHashAlg()); + assertTrue(f.getDefaultTranscoder() instanceof WhalinTranscoder); + assertSame(FailureMode.Redistribute, f.getFailureMode()); + assertEquals(1, f.getInitialObservers().size()); + assertSame(testObserver, f.getInitialObservers().iterator().next()); + assertTrue(f.getOperationFactory() instanceof BinaryOperationFactory); + assertSame(oQueue, f.createOperationQueue()); + assertSame(rQueue, f.createReadOperationQueue()); + assertSame(wQueue, f.createWriteOperationQueue()); + assertTrue(f.isDaemon()); + assertFalse(f.shouldOptimize()); + assertTrue(f.useNagleAlgorithm()); + assertEquals(f.getOpQueueMaxBlockTime(), 19); + assertSame(anAuthDescriptor, f.getAuthDescriptor()); + + MemcachedNode n = new MockMemcachedNode( + InetSocketAddress.createUnresolved("localhost", 11211)); + assertTrue(f.createLocator(Collections.singletonList(n)) + instanceof KetamaNodeLocator); + + SocketChannel sc = SocketChannel.open(); + try { + assertTrue(f.createMemcachedNode( + InetSocketAddress.createUnresolved("localhost", 11211), + sc, 1) + instanceof BinaryMemcachedNodeImpl); + } finally { + sc.close(); + } + } + + public void testProtocolSetterBinary() { + assertTrue( + b.setProtocol(Protocol.BINARY).build().getOperationFactory() + instanceof BinaryOperationFactory); + } + + public void testProtocolSetterText() { + assertTrue( + b.setProtocol(Protocol.TEXT).build().getOperationFactory() + instanceof AsciiOperationFactory); + + } + + static class DirectFactory implements OperationQueueFactory { + private final BlockingQueue queue; + + public DirectFactory(BlockingQueue q) { + super(); + queue = q; + } + + public BlockingQueue create() { + return queue; + } + + } +} diff --git a/src/test/java/net/spy/memcached/ConnectionFactoryTest.java b/src/test/java/net/spy/memcached/ConnectionFactoryTest.java new file mode 100644 index 000000000..74b4f27e3 --- /dev/null +++ b/src/test/java/net/spy/memcached/ConnectionFactoryTest.java @@ -0,0 +1,33 @@ +package net.spy.memcached; + +import junit.framework.TestCase; + +/** + * Test connection factory variations. + */ +public class ConnectionFactoryTest extends TestCase { + + // These tests are a little lame. They don't verify anything other than + // that the code executes without failure. + public void testBinaryEmptyCons() { + new BinaryConnectionFactory(); + } + + public void testBinaryTwoIntCons() { + new BinaryConnectionFactory(5, 5); + } + + public void testBinaryAnIntAnotherIntAndAHashAlgorithmCons() { + new BinaryConnectionFactory(5, 5, + HashAlgorithm.FNV1_64_HASH); + } + + public void testQueueSizes() { + ConnectionFactory cf=new DefaultConnectionFactory(100, 1024); + assertEquals(100, cf.createOperationQueue().remainingCapacity()); + assertEquals(Integer.MAX_VALUE, + cf.createWriteOperationQueue().remainingCapacity()); + assertEquals(Integer.MAX_VALUE, + cf.createReadOperationQueue().remainingCapacity()); + } +} diff --git a/src/test/java/net/spy/memcached/ConsistentHashingTest.java b/src/test/java/net/spy/memcached/ConsistentHashingTest.java new file mode 100644 index 000000000..bfc228db0 --- /dev/null +++ b/src/test/java/net/spy/memcached/ConsistentHashingTest.java @@ -0,0 +1,128 @@ +package net.spy.memcached; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; + +import junit.framework.TestCase; + +/** + */ +public class ConsistentHashingTest extends TestCase { + + public void testSmallSet() { + runThisManyNodes(3); + } + + public void testLargeSet() { + runThisManyNodes(100); + } + + /** + * Simulate dropping from (totalNodes) to (totalNodes-1). + * Ensure hashing is consistent between the the two scenarios. + * @param totalNodes + */ + private void runThisManyNodes(final int totalNodes) { + final String[] stringNodes = generateAddresses(totalNodes); + + List smaller = createNodes( + AddrUtil.getAddresses(stringNodes[0])); + List larger = createNodes( + AddrUtil.getAddresses(stringNodes[1])); + + assertTrue(larger.containsAll(smaller)); + MemcachedNode oddManOut = larger.get(larger.size()-1); + assertFalse(smaller.contains(oddManOut)); + + KetamaNodeLocator lgLocator = new KetamaNodeLocator( + larger, HashAlgorithm.KETAMA_HASH); + KetamaNodeLocator smLocator = new KetamaNodeLocator( + smaller, HashAlgorithm.KETAMA_HASH); + + SortedMap lgMap = lgLocator.ketamaNodes; + SortedMap smMap = smLocator.ketamaNodes; + + // Verify that EVERY entry in the smaller map has an equivalent + // mapping in the larger map. + boolean failed = false; + for (final Long key : smMap.keySet()) { + final MemcachedNode largeNode = lgMap.get(key); + final MemcachedNode smallNode = smMap.get(key); + if (!largeNode.equals(smallNode)) { + failed = true; + System.out.println("---------------"); + System.out.println("Key: " + key); + System.out.println("Small: " + smallNode.getSocketAddress()); + System.out.println("Large: " + largeNode.getSocketAddress()); + } + } + assertFalse(failed); + + for (final Map.Entry entry : lgMap.entrySet()) { + final Long key = entry.getKey(); + final MemcachedNode node = entry.getValue(); + if (node.equals(oddManOut)) { + final MemcachedNode newNode = smLocator.getNodeForKey(key); + if (!smaller.contains(newNode)) { + System.out.println( + "Error - " + key + " -> " + newNode.getSocketAddress()); + failed = true; + } + } + } + assertFalse(failed); + + } + + private String[] generateAddresses(final int maxSize) { + final String[] results = new String[2]; + + // Generate a pseudo-random set of addresses. + long now = new Date().getTime(); + int first = (int) ((now % 250) + 3); + + int second = (int) (((now/250) % 250) + 3); + + String port = ":11211 "; + int last = (int) ((now % 100) + 3); + + StringBuffer prefix = new StringBuffer(); + prefix.append(first); + prefix.append("."); + prefix.append(second); + prefix.append(".1."); + + // Don't protect the possible range too much, as we are our own client. + StringBuffer buf = new StringBuffer(); + for (int ix = 0; ix < maxSize - 1; ix++) { + buf.append(prefix); + buf.append(last+ix); + buf.append(port); + } + + results[0] = buf.toString(); + + buf.append(prefix); + buf.append(last+maxSize-1); + buf.append(port); + + results[1] = buf.toString(); + + return results; + } + + private List createNodes(List addresses) { + List results = new ArrayList(); + + for (InetSocketAddress addr : addresses) { + results.add(new MockMemcachedNode(addr)); + } + + return results; + } + +} diff --git a/src/test/java/net/spy/memcached/DoLotsOfSets.java b/src/test/java/net/spy/memcached/DoLotsOfSets.java new file mode 100644 index 000000000..2e1d8f559 --- /dev/null +++ b/src/test/java/net/spy/memcached/DoLotsOfSets.java @@ -0,0 +1,35 @@ +package net.spy.memcached; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Small test program that does a bunch of sets in a tight loop. + */ +public class DoLotsOfSets { + + public static void main(String[] args) throws Exception { + // Create a client with a queue big enough to hold the 300,000 items + // we're going to add. + MemcachedClient client=new MemcachedClient( + new DefaultConnectionFactory(350000, 32768), + AddrUtil.getAddresses("localhost:11211")); + long start=System.currentTimeMillis(); + byte[] toStore=new byte[26]; + Arrays.fill(toStore, (byte)'a'); + for(int i=0; i<300000; i++) { + client.set("k" + i, 300, toStore); + } + long added=System.currentTimeMillis(); + System.err.printf("Finished queuing in %sms%n", added-start); + client.waitForQueues(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + long end=System.currentTimeMillis(); + System.err.printf("Completed everything in %sms (%sms to flush)%n", + end-start, end-added); + Map m = client.getBulk("k1", "k2", "k3", "k4", "k5", + "k299999", "k299998", "k299997", "k299996"); + assert m.size() == 9 : "Expected 9 results, got " + m; + client.shutdown(); + } +} diff --git a/src/test/java/net/spy/memcached/HashAlgorithmTest.java b/src/test/java/net/spy/memcached/HashAlgorithmTest.java new file mode 100644 index 000000000..70751dbc7 --- /dev/null +++ b/src/test/java/net/spy/memcached/HashAlgorithmTest.java @@ -0,0 +1,125 @@ +package net.spy.memcached; + +import java.util.HashMap; +import java.util.Map; + +import junit.framework.TestCase; + +/** + * Test the hash algorithms. + */ +public class HashAlgorithmTest extends TestCase { + + private void assertHash(HashAlgorithm ha, String key, long exp) { + assertTrue(exp >= 0L); + // System.out.println(ha + "(" + key + ") = " + exp); + assertEquals("Invalid " + ha + " for key ``" + key + "''", + exp, ha.hash(key)); + } + + // I don't hardcode any values here because they're subject to change + private void assertNativeHash(String key) { + assertHash(HashAlgorithm.NATIVE_HASH, key, Math.abs(key.hashCode())); + } + + public void testNativeHash() { + for (String k : new String[] { "Test1", "Test2", "Test3", "Test4" }) { + assertNativeHash(k); + } + } + + public void testCrc32Hash() { + Map exp = new HashMap(); + exp.put("Test1", 19315L); + exp.put("Test2", 21114L); + exp.put("Test3", 9597L); + exp.put("Test4", 15129L); + exp.put("UDATA:edevil@sapo.pt", 558L); + + for (Map.Entry me : exp.entrySet()) { + assertHash(HashAlgorithm.CRC32_HASH, me.getKey(), me.getValue()); + } + } + + public void testFnv1_64() { + HashMap exp = new HashMap(); + exp.put("", 0x84222325L); + exp.put(" ", 0x8601b7ffL); + exp.put("hello world!", 0xb97b86bcL); + exp.put("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", + 0xe87c054aL); + exp.put("wd:com.google", 0x071b08f8L); + exp.put("wd:com.google ", 0x12f03d48L); + + for (Map.Entry me : exp.entrySet()) { + assertHash(HashAlgorithm.FNV1_64_HASH, me.getKey(), + Math.abs(me.getValue())); + } + } + + // Thanks much to pierre@demartines.com for this unit test. + public void testFnv1a_64() { + HashMap exp = new HashMap(); + exp.put("", 0x84222325L); + exp.put(" ", 0x8601817fL); + exp.put("hello world!", 0xcd5a2672L); + exp.put("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", + 0xbec309a8L); + exp.put("wd:com.google", 0x097b3f26L); + exp.put("wd:com.google ", 0x1c6c1732L); + + for (Map.Entry me : exp.entrySet()) { + assertHash(HashAlgorithm.FNV1A_64_HASH, me.getKey(), + Math.abs(me.getValue())); + } + } + + public void testFnv1_32() { + HashMap exp = new HashMap(); + exp.put("", 0x811c9dc5L); + exp.put(" ", 0x050c5d3fL); + exp.put("hello world!", 0x8a01b99cL); + exp.put("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", + 0x9277524aL); + exp.put("wd:com.google", 0x455e0df8L); + exp.put("wd:com.google ", 0x2b0ffd48L); + + for (Map.Entry me : exp.entrySet()) { + assertHash(HashAlgorithm.FNV1_32_HASH, me.getKey(), + Math.abs(me.getValue())); + } + } + + public void testFnv1a_32() { + HashMap exp = new HashMap(); + exp.put("", 0x811c9dc5L); + exp.put(" ", 0x250c8f7fL); + exp.put("hello world!", 0xb034fff2L); + exp.put("Lorem ipsum dolor sit amet, consectetuer adipiscing elit.", + 0xa9795ec8L); + exp.put("wd:com.google", 0xaa90fcc6L); + exp.put("wd:com.google ", 0x683e1e12L); + + for (Map.Entry me : exp.entrySet()) { + assertHash(HashAlgorithm.FNV1A_32_HASH, me.getKey(), + Math.abs(me.getValue())); + } + } + + // These values came from libketama's test prog. + public void testKetamaHash() { + HashMap exp = new HashMap(); + exp.put("26", 3979113294L); + exp.put("1404", 2065000984L); + exp.put("4177", 1125759251L); + exp.put("9315", 3302915307L); + exp.put("14745", 2580083742L); + exp.put("105106", 3986458246L); + exp.put("355107", 3611074310L); + + for (Map.Entry me : exp.entrySet()) { + assertHash(HashAlgorithm.KETAMA_HASH, me.getKey(), + Math.abs(me.getValue())); + } + } +} diff --git a/src/test/java/net/spy/memcached/KetamaConnectionFactoryTest.java b/src/test/java/net/spy/memcached/KetamaConnectionFactoryTest.java new file mode 100644 index 000000000..80f9a0d59 --- /dev/null +++ b/src/test/java/net/spy/memcached/KetamaConnectionFactoryTest.java @@ -0,0 +1,25 @@ +package net.spy.memcached; + +import java.util.ArrayList; + +import junit.framework.TestCase; + +/** + * A very basic test that the KetamaConnectionFactory returns both the correct + * hash algorithm and the correct node locator. + */ +public class KetamaConnectionFactoryTest extends TestCase { + + /* + * This *is* kinda lame, but it tests the specific differences from the + * DefaultConnectionFactory. + */ + public void testCorrectTypes() { + ConnectionFactory factory = new KetamaConnectionFactory(); + + NodeLocator locator = factory.createLocator(new ArrayList()); + assertTrue(locator instanceof KetamaNodeLocator); + + assertEquals(HashAlgorithm.KETAMA_HASH, factory.getHashAlg()); + } +} diff --git a/src/test/java/net/spy/memcached/KetamaNodeLocatorTest.java b/src/test/java/net/spy/memcached/KetamaNodeLocatorTest.java new file mode 100644 index 000000000..165689744 --- /dev/null +++ b/src/test/java/net/spy/memcached/KetamaNodeLocatorTest.java @@ -0,0 +1,3407 @@ +package net.spy.memcached; + +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +/** + * Test ketama node location. + */ +public class KetamaNodeLocatorTest extends AbstractNodeLocationCase { + + protected void setupNodes(HashAlgorithm alg, int n) { + super.setupNodes(n); + for(int i=0; i all = locator.getAll(); + assertEquals(4, all.size()); + for(int i=0; i<4; i++) { + assertTrue(all.contains(nodes[i])); + } + } + + public void testAllClone() throws Exception { + setupNodes(4); + + Collection all = locator.getReadonlyCopy().getAll(); + assertEquals(4, all.size()); + } + + public void testLookups() { + setupNodes(4); + assertSame(nodes[0], locator.getPrimary("dustin")); + assertSame(nodes[2], locator.getPrimary("noelani")); + assertSame(nodes[0], locator.getPrimary("some other key")); + } + + public void testLookupsClone() { + setupNodes(4); + assertSame(nodes[0].toString(), + locator.getReadonlyCopy().getPrimary("dustin").toString()); + assertSame(nodes[2].toString(), + locator.getReadonlyCopy().getPrimary("noelani").toString()); + assertSame(nodes[0].toString(), + locator.getReadonlyCopy().getPrimary("some other key").toString()); + } + + public void testContinuumWrapping() { + setupNodes(4); + // This is the method by which I found something that would wrap + /* + String key="a"; + // maximum key found in the ketama continuum + long lastKey=((KetamaNodeLocator)locator).getMaxKey(); + while(HashAlgorithm.KETAMA_HASH.hash(key) < lastKey) { + key=PwGen.getPass(8); + } + System.out.println("Found a key past the end of the continuum: " + + key); + */ + assertEquals(4294887009L, ((KetamaNodeLocator)locator).getMaxKey()); + + assertSame(nodes[3], locator.getPrimary("V5XS8C8N")); + assertSame(nodes[3], locator.getPrimary("8KR2DKR2")); + assertSame(nodes[3], locator.getPrimary("L9KH6X4X")); + } + + public void testClusterResizing() { + setupNodes(4); + assertSame(nodes[0], locator.getPrimary("dustin")); + assertSame(nodes[2], locator.getPrimary("noelani")); + assertSame(nodes[0], locator.getPrimary("some other key")); + + setupNodes(5); + assertSame(nodes[0], locator.getPrimary("dustin")); + assertSame(nodes[2], locator.getPrimary("noelani")); + assertSame(nodes[4], locator.getPrimary("some other key")); + } + + public void testSequence1() { + setupNodes(4); + assertSequence("dustin", 0, 2, 1, 2); + } + + public void testSequence2() { + setupNodes(4); + assertSequence("noelani", 2, 1, 1, 3); + } + + private void assertPosForKey(String k, int nid) { + assertSame(nodes[nid], locator.getPrimary(k)); + } + + public void testLibKetamaCompat() { + setupNodes(5); + assertPosForKey("36", 2); + assertPosForKey("10037", 3); + assertPosForKey("22051", 1); + assertPosForKey("49044", 4); + } + + public void testFNV1A_32() { + HashAlgorithm alg=HashAlgorithm.FNV1A_32_HASH; + setupNodes(alg, 5); + assertSequence("noelani", 1, 2, 2, 2, 3); + + assertSame(nodes[2], locator.getPrimary("dustin")); + assertSame(nodes[1], locator.getPrimary("noelani")); + assertSame(nodes[4], locator.getPrimary("some other key")); + } + + private MemcachedNode[] mockNodes(String servers[]) { + setupNodes(servers.length); + + for(int i=0; i a=AddrUtil.getAddresses(servers[i]); + + nodeMocks[i].expects(atLeastOnce()) + .method("getSocketAddress") + .will(returnValue(a.iterator().next())); + + } + return nodes; + } + + public void testLibKetamaCompatTwo() { + String servers[] = { + "10.0.1.1:11211", + "10.0.1.2:11211", + "10.0.1.3:11211", + "10.0.1.4:11211", + "10.0.1.5:11211", + "10.0.1.6:11211", + "10.0.1.7:11211", + "10.0.1.8:11211"}; + locator=new KetamaNodeLocator(Arrays.asList(mockNodes(servers)), + HashAlgorithm.KETAMA_HASH); + + String[][] exp = { + {"0", "10.0.1.1:11211"}, + {"233", "10.0.1.7:11211"}, + {"466", "10.0.1.3:11211"}, + {"699", "10.0.1.1:11211"}, + {"932", "10.0.1.6:11211"}, + {"1165", "10.0.1.2:11211"}, + {"1398", "10.0.1.1:11211"}, + {"1631", "10.0.1.6:11211"}, + {"1864", "10.0.1.5:11211"}, + {"2097", "10.0.1.3:11211"}, + {"2330", "10.0.1.7:11211"}, + {"2563", "10.0.1.3:11211"}, + {"2796", "10.0.1.6:11211"}, + {"3029", "10.0.1.1:11211"}, + {"3262", "10.0.1.2:11211"}, + {"3495", "10.0.1.3:11211"}, + {"3728", "10.0.1.8:11211"}, + {"3961", "10.0.1.4:11211"}, + {"4194", "10.0.1.4:11211"}, + {"4427", "10.0.1.3:11211"}, + {"4660", "10.0.1.4:11211"}, + {"4893", "10.0.1.7:11211"}, + {"5126", "10.0.1.4:11211"}, + {"5359", "10.0.1.2:11211"}, + {"5592", "10.0.1.2:11211"}, + {"5825", "10.0.1.3:11211"}, + {"6058", "10.0.1.2:11211"}, + {"6291", "10.0.1.7:11211"}, + {"6524", "10.0.1.5:11211"}, + {"6757", "10.0.1.5:11211"}, + {"6990", "10.0.1.1:11211"}, + {"7223", "10.0.1.5:11211"}, + {"7456", "10.0.1.4:11211"}, + {"7689", "10.0.1.2:11211"}, + {"7922", "10.0.1.5:11211"}, + {"8155", "10.0.1.5:11211"}, + {"8388", "10.0.1.1:11211"}, + {"8621", "10.0.1.2:11211"}, + {"8854", "10.0.1.2:11211"}, + {"9087", "10.0.1.1:11211"}, + {"9320", "10.0.1.6:11211"}, + {"9553", "10.0.1.3:11211"}, + {"9786", "10.0.1.2:11211"}, + {"10019", "10.0.1.5:11211"}, + {"10252", "10.0.1.1:11211"}, + {"10485", "10.0.1.5:11211"}, + {"10718", "10.0.1.5:11211"}, + {"10951", "10.0.1.2:11211"}, + {"11184", "10.0.1.5:11211"}, + {"11417", "10.0.1.3:11211"}, + {"11650", "10.0.1.8:11211"}, + {"11883", "10.0.1.2:11211"}, + {"12116", "10.0.1.2:11211"}, + {"12349", "10.0.1.7:11211"}, + {"12582", "10.0.1.5:11211"}, + {"12815", "10.0.1.3:11211"}, + {"13048", "10.0.1.8:11211"}, + {"13281", "10.0.1.6:11211"}, + {"13514", "10.0.1.3:11211"}, + {"13747", "10.0.1.6:11211"}, + {"13980", "10.0.1.6:11211"}, + {"14213", "10.0.1.8:11211"}, + {"14446", "10.0.1.2:11211"}, + {"14679", "10.0.1.3:11211"}, + {"14912", "10.0.1.7:11211"}, + {"15145", "10.0.1.1:11211"}, + {"15378", "10.0.1.4:11211"}, + {"15611", "10.0.1.1:11211"}, + {"15844", "10.0.1.3:11211"}, + {"16077", "10.0.1.3:11211"}, + {"16310", "10.0.1.5:11211"}, + {"16543", "10.0.1.5:11211"}, + {"16776", "10.0.1.4:11211"}, + {"17009", "10.0.1.1:11211"}, + {"17242", "10.0.1.4:11211"}, + {"17475", "10.0.1.7:11211"}, + {"17708", "10.0.1.6:11211"}, + {"17941", "10.0.1.2:11211"}, + {"28892", "10.0.1.7:11211"}, + {"29125", "10.0.1.4:11211"}, + {"29358", "10.0.1.7:11211"}, + {"29591", "10.0.1.5:11211"}, + {"29824", "10.0.1.7:11211"}, + {"30057", "10.0.1.7:11211"}, + {"30290", "10.0.1.3:11211"}, + {"30523", "10.0.1.8:11211"}, + {"30756", "10.0.1.3:11211"}, + {"30989", "10.0.1.4:11211"}, + {"31222", "10.0.1.6:11211"}, + {"31455", "10.0.1.1:11211"}, + {"31688", "10.0.1.2:11211"}, + {"31921", "10.0.1.2:11211"}, + {"32154", "10.0.1.8:11211"}, + {"32387", "10.0.1.1:11211"}, + {"32620", "10.0.1.3:11211"}, + {"32853", "10.0.1.2:11211"}, + {"33086", "10.0.1.7:11211"}, + {"33319", "10.0.1.4:11211"}, + {"33552", "10.0.1.5:11211"}, + {"33785", "10.0.1.3:11211"}, + {"34018", "10.0.1.3:11211"}, + {"34251", "10.0.1.6:11211"}, + {"34484", "10.0.1.7:11211"}, + {"34717", "10.0.1.1:11211"}, + {"34950", "10.0.1.3:11211"}, + {"35183", "10.0.1.6:11211"}, + {"35416", "10.0.1.8:11211"}, + {"35649", "10.0.1.5:11211"}, + {"35882", "10.0.1.7:11211"}, + {"36115", "10.0.1.2:11211"}, + {"36348", "10.0.1.5:11211"}, + {"36581", "10.0.1.7:11211"}, + {"36814", "10.0.1.8:11211"}, + {"37047", "10.0.1.8:11211"}, + {"37280", "10.0.1.8:11211"}, + {"42173", "10.0.1.8:11211"}, + {"42406", "10.0.1.3:11211"}, + {"47998", "10.0.1.2:11211"}, + {"48231", "10.0.1.5:11211"}, + {"48464", "10.0.1.5:11211"}, + {"48697", "10.0.1.3:11211"}, + {"48930", "10.0.1.1:11211"}, + {"49163", "10.0.1.2:11211"}, + {"49396", "10.0.1.8:11211"}, + {"49629", "10.0.1.1:11211"}, + {"49862", "10.0.1.8:11211"}, + {"50095", "10.0.1.5:11211"}, + {"50328", "10.0.1.2:11211"}, + {"50561", "10.0.1.5:11211"}, + {"50794", "10.0.1.7:11211"}, + {"51027", "10.0.1.3:11211"}, + {"51260", "10.0.1.5:11211"}, + {"51493", "10.0.1.3:11211"}, + {"51726", "10.0.1.8:11211"}, + {"51959", "10.0.1.2:11211"}, + {"52192", "10.0.1.8:11211"}, + {"56153", "10.0.1.2:11211"}, + {"56386", "10.0.1.6:11211"}, + {"56619", "10.0.1.8:11211"}, + {"56852", "10.0.1.6:11211"}, + {"57085", "10.0.1.2:11211"}, + {"57318", "10.0.1.7:11211"}, + {"57551", "10.0.1.8:11211"}, + {"57784", "10.0.1.4:11211"}, + {"58017", "10.0.1.6:11211"}, + {"58250", "10.0.1.8:11211"}, + {"58483", "10.0.1.8:11211"}, + {"58716", "10.0.1.6:11211"}, + {"58949", "10.0.1.7:11211"}, + {"59182", "10.0.1.3:11211"}, + {"59415", "10.0.1.2:11211"}, + {"59648", "10.0.1.7:11211"}, + {"59881", "10.0.1.8:11211"}, + {"60114", "10.0.1.8:11211"}, + {"60347", "10.0.1.3:11211"}, + {"60580", "10.0.1.6:11211"}, + {"60813", "10.0.1.8:11211"}, + {"61046", "10.0.1.6:11211"}, + {"61279", "10.0.1.7:11211"}, + {"61512", "10.0.1.5:11211"}, + {"61745", "10.0.1.7:11211"}, + {"61978", "10.0.1.8:11211"}, + {"62211", "10.0.1.7:11211"}, + {"62444", "10.0.1.1:11211"}, + {"62677", "10.0.1.7:11211"}, + {"62910", "10.0.1.3:11211"}, + {"63143", "10.0.1.2:11211"}, + {"63376", "10.0.1.2:11211"}, + {"63609", "10.0.1.6:11211"}, + {"63842", "10.0.1.2:11211"}, + {"64075", "10.0.1.5:11211"}, + {"64308", "10.0.1.6:11211"}, + {"64541", "10.0.1.5:11211"}, + {"64774", "10.0.1.4:11211"}, + {"65007", "10.0.1.7:11211"}, + {"65240", "10.0.1.7:11211"}, + {"65473", "10.0.1.6:11211"}, + {"65706", "10.0.1.8:11211"}, + {"65939", "10.0.1.4:11211"}, + {"66172", "10.0.1.1:11211"}, + {"66405", "10.0.1.2:11211"}, + {"66638", "10.0.1.6:11211"}, + {"66871", "10.0.1.5:11211"}, + {"67104", "10.0.1.2:11211"}, + {"67337", "10.0.1.8:11211"}, + {"67570", "10.0.1.8:11211"}, + {"67803", "10.0.1.5:11211"}, + {"68036", "10.0.1.8:11211"}, + {"68269", "10.0.1.4:11211"}, + {"68502", "10.0.1.7:11211"}, + {"68735", "10.0.1.1:11211"}, + {"68968", "10.0.1.6:11211"}, + {"69201", "10.0.1.6:11211"}, + {"69434", "10.0.1.6:11211"}, + {"69667", "10.0.1.3:11211"}, + {"69900", "10.0.1.2:11211"}, + {"70133", "10.0.1.8:11211"}, + {"70366", "10.0.1.2:11211"}, + {"70599", "10.0.1.2:11211"}, + {"70832", "10.0.1.1:11211"}, + {"71065", "10.0.1.5:11211"}, + {"71298", "10.0.1.2:11211"}, + {"71531", "10.0.1.2:11211"}, + {"71764", "10.0.1.5:11211"}, + {"71997", "10.0.1.5:11211"}, + {"72230", "10.0.1.2:11211"}, + {"72463", "10.0.1.7:11211"}, + {"72696", "10.0.1.6:11211"}, + {"72929", "10.0.1.4:11211"}, + {"73162", "10.0.1.4:11211"}, + {"73395", "10.0.1.7:11211"}, + {"73628", "10.0.1.7:11211"}, + {"73861", "10.0.1.1:11211"}, + {"74094", "10.0.1.6:11211"}, + {"74327", "10.0.1.1:11211"}, + {"74560", "10.0.1.6:11211"}, + {"74793", "10.0.1.1:11211"}, + {"75026", "10.0.1.5:11211"}, + {"75259", "10.0.1.5:11211"}, + {"75492", "10.0.1.8:11211"}, + {"75725", "10.0.1.7:11211"}, + {"75958", "10.0.1.4:11211"}, + {"76191", "10.0.1.5:11211"}, + {"76424", "10.0.1.5:11211"}, + {"76657", "10.0.1.2:11211"}, + {"76890", "10.0.1.7:11211"}, + {"77123", "10.0.1.4:11211"}, + {"77356", "10.0.1.2:11211"}, + {"77589", "10.0.1.6:11211"}, + {"77822", "10.0.1.1:11211"}, + {"78055", "10.0.1.6:11211"}, + {"78288", "10.0.1.7:11211"}, + {"78521", "10.0.1.7:11211"}, + {"78754", "10.0.1.5:11211"}, + {"78987", "10.0.1.6:11211"}, + {"79220", "10.0.1.4:11211"}, + {"79453", "10.0.1.6:11211"}, + {"79686", "10.0.1.4:11211"}, + {"79919", "10.0.1.3:11211"}, + {"80152", "10.0.1.2:11211"}, + {"80385", "10.0.1.6:11211"}, + {"80618", "10.0.1.5:11211"}, + {"80851", "10.0.1.7:11211"}, + {"81084", "10.0.1.8:11211"}, + {"81317", "10.0.1.5:11211"}, + {"81550", "10.0.1.8:11211"}, + {"81783", "10.0.1.4:11211"}, + {"82016", "10.0.1.8:11211"}, + {"82249", "10.0.1.5:11211"}, + {"82482", "10.0.1.5:11211"}, + {"82715", "10.0.1.5:11211"}, + {"82948", "10.0.1.5:11211"}, + {"83181", "10.0.1.1:11211"}, + {"83414", "10.0.1.1:11211"}, + {"83647", "10.0.1.2:11211"}, + {"83880", "10.0.1.2:11211"}, + {"84113", "10.0.1.6:11211"}, + {"84346", "10.0.1.6:11211"}, + {"84579", "10.0.1.5:11211"}, + {"84812", "10.0.1.8:11211"}, + {"85045", "10.0.1.6:11211"}, + {"85278", "10.0.1.7:11211"}, + {"85511", "10.0.1.2:11211"}, + {"85744", "10.0.1.1:11211"}, + {"85977", "10.0.1.6:11211"}, + {"86210", "10.0.1.7:11211"}, + {"86443", "10.0.1.4:11211"}, + {"86676", "10.0.1.3:11211"}, + {"86909", "10.0.1.1:11211"}, + {"87142", "10.0.1.8:11211"}, + {"87375", "10.0.1.1:11211"}, + {"87608", "10.0.1.7:11211"}, + {"87841", "10.0.1.1:11211"}, + {"88074", "10.0.1.1:11211"}, + {"88307", "10.0.1.7:11211"}, + {"88540", "10.0.1.4:11211"}, + {"88773", "10.0.1.5:11211"}, + {"89006", "10.0.1.2:11211"}, + {"89239", "10.0.1.1:11211"}, + {"89472", "10.0.1.6:11211"}, + {"89705", "10.0.1.2:11211"}, + {"89938", "10.0.1.2:11211"}, + {"90171", "10.0.1.7:11211"}, + {"90404", "10.0.1.5:11211"}, + {"90637", "10.0.1.8:11211"}, + {"90870", "10.0.1.8:11211"}, + {"91103", "10.0.1.7:11211"}, + {"91336", "10.0.1.5:11211"}, + {"91569", "10.0.1.8:11211"}, + {"91802", "10.0.1.2:11211"}, + {"92035", "10.0.1.8:11211"}, + {"92268", "10.0.1.4:11211"}, + {"92501", "10.0.1.6:11211"}, + {"92734", "10.0.1.2:11211"}, + {"92967", "10.0.1.6:11211"}, + {"93200", "10.0.1.1:11211"}, + {"93433", "10.0.1.2:11211"}, + {"93666", "10.0.1.6:11211"}, + {"93899", "10.0.1.2:11211"}, + {"94132", "10.0.1.2:11211"}, + {"103685", "10.0.1.2:11211"}, + {"103918", "10.0.1.7:11211"}, + {"104151", "10.0.1.5:11211"}, + {"104384", "10.0.1.1:11211"}, + {"104617", "10.0.1.3:11211"}, + {"104850", "10.0.1.3:11211"}, + {"105083", "10.0.1.7:11211"}, + {"105316", "10.0.1.2:11211"}, + {"105549", "10.0.1.3:11211"}, + {"105782", "10.0.1.2:11211"}, + {"106015", "10.0.1.4:11211"}, + {"106248", "10.0.1.3:11211"}, + {"106481", "10.0.1.4:11211"}, + {"106714", "10.0.1.8:11211"}, + {"106947", "10.0.1.8:11211"}, + {"107180", "10.0.1.6:11211"}, + {"107413", "10.0.1.7:11211"}, + {"107646", "10.0.1.5:11211"}, + {"107879", "10.0.1.8:11211"}, + {"108112", "10.0.1.1:11211"}, + {"108345", "10.0.1.8:11211"}, + {"108578", "10.0.1.5:11211"}, + {"112539", "10.0.1.8:11211"}, + {"112772", "10.0.1.1:11211"}, + {"113005", "10.0.1.5:11211"}, + {"113238", "10.0.1.4:11211"}, + {"116034", "10.0.1.8:11211"}, + {"116267", "10.0.1.1:11211"}, + {"116500", "10.0.1.6:11211"}, + {"116733", "10.0.1.1:11211"}, + {"116966", "10.0.1.6:11211"}, + {"117199", "10.0.1.4:11211"}, + {"117432", "10.0.1.1:11211"}, + {"117665", "10.0.1.2:11211"}, + {"117898", "10.0.1.6:11211"}, + {"118131", "10.0.1.3:11211"}, + {"118364", "10.0.1.2:11211"}, + {"118597", "10.0.1.5:11211"}, + {"118830", "10.0.1.5:11211"}, + {"119063", "10.0.1.3:11211"}, + {"119296", "10.0.1.6:11211"}, + {"119529", "10.0.1.1:11211"}, + {"119762", "10.0.1.6:11211"}, + {"119995", "10.0.1.7:11211"}, + {"120228", "10.0.1.2:11211"}, + {"120461", "10.0.1.2:11211"}, + {"124888", "10.0.1.3:11211"}, + {"125121", "10.0.1.6:11211"}, + {"125354", "10.0.1.5:11211"}, + {"125587", "10.0.1.2:11211"}, + {"125820", "10.0.1.3:11211"}, + {"126053", "10.0.1.5:11211"}, + {"126286", "10.0.1.5:11211"}, + {"126519", "10.0.1.2:11211"}, + {"126752", "10.0.1.6:11211"}, + {"126985", "10.0.1.7:11211"}, + {"127218", "10.0.1.6:11211"}, + {"127451", "10.0.1.7:11211"}, + {"127684", "10.0.1.6:11211"}, + {"127917", "10.0.1.7:11211"}, + {"128150", "10.0.1.6:11211"}, + {"128383", "10.0.1.1:11211"}, + {"128616", "10.0.1.4:11211"}, + {"128849", "10.0.1.3:11211"}, + {"129082", "10.0.1.5:11211"}, + {"129315", "10.0.1.8:11211"}, + {"129548", "10.0.1.6:11211"}, + {"129781", "10.0.1.6:11211"}, + {"130014", "10.0.1.6:11211"}, + {"130247", "10.0.1.5:11211"}, + {"130480", "10.0.1.6:11211"}, + {"130713", "10.0.1.2:11211"}, + {"130946", "10.0.1.5:11211"}, + {"131179", "10.0.1.5:11211"}, + {"131412", "10.0.1.7:11211"}, + {"131645", "10.0.1.2:11211"}, + {"131878", "10.0.1.6:11211"}, + {"132111", "10.0.1.5:11211"}, + {"132344", "10.0.1.8:11211"}, + {"132577", "10.0.1.1:11211"}, + {"132810", "10.0.1.1:11211"}, + {"133043", "10.0.1.7:11211"}, + {"133276", "10.0.1.4:11211"}, + {"133509", "10.0.1.8:11211"}, + {"133742", "10.0.1.3:11211"}, + {"133975", "10.0.1.5:11211"}, + {"134208", "10.0.1.1:11211"}, + {"134441", "10.0.1.8:11211"}, + {"134674", "10.0.1.7:11211"}, + {"134907", "10.0.1.4:11211"}, + {"135140", "10.0.1.3:11211"}, + {"135373", "10.0.1.5:11211"}, + {"135606", "10.0.1.7:11211"}, + {"135839", "10.0.1.8:11211"}, + {"136072", "10.0.1.8:11211"}, + {"136305", "10.0.1.7:11211"}, + {"136538", "10.0.1.1:11211"}, + {"136771", "10.0.1.1:11211"}, + {"137004", "10.0.1.2:11211"}, + {"137237", "10.0.1.2:11211"}, + {"137470", "10.0.1.2:11211"}, + {"137703", "10.0.1.3:11211"}, + {"137936", "10.0.1.1:11211"}, + {"138169", "10.0.1.6:11211"}, + {"138402", "10.0.1.8:11211"}, + {"138635", "10.0.1.7:11211"}, + {"138868", "10.0.1.2:11211"}, + {"139101", "10.0.1.4:11211"}, + {"139334", "10.0.1.7:11211"}, + {"139567", "10.0.1.6:11211"}, + {"139800", "10.0.1.1:11211"}, + {"140033", "10.0.1.8:11211"}, + {"140266", "10.0.1.7:11211"}, + {"140499", "10.0.1.3:11211"}, + {"140732", "10.0.1.2:11211"}, + {"140965", "10.0.1.7:11211"}, + {"141198", "10.0.1.6:11211"}, + {"141431", "10.0.1.7:11211"}, + {"141664", "10.0.1.5:11211"}, + {"141897", "10.0.1.5:11211"}, + {"142130", "10.0.1.5:11211"}, + {"142363", "10.0.1.4:11211"}, + {"142596", "10.0.1.8:11211"}, + {"142829", "10.0.1.2:11211"}, + {"143062", "10.0.1.2:11211"}, + {"143295", "10.0.1.4:11211"}, + {"143528", "10.0.1.8:11211"}, + {"143761", "10.0.1.2:11211"}, + {"143994", "10.0.1.5:11211"}, + {"144227", "10.0.1.3:11211"}, + {"144460", "10.0.1.2:11211"}, + {"152149", "10.0.1.2:11211"}, + {"152382", "10.0.1.2:11211"}, + {"152615", "10.0.1.3:11211"}, + {"152848", "10.0.1.5:11211"}, + {"153081", "10.0.1.1:11211"}, + {"153314", "10.0.1.1:11211"}, + {"153547", "10.0.1.1:11211"}, + {"153780", "10.0.1.1:11211"}, + {"154013", "10.0.1.2:11211"}, + {"154246", "10.0.1.1:11211"}, + {"154479", "10.0.1.8:11211"}, + {"154712", "10.0.1.1:11211"}, + {"154945", "10.0.1.1:11211"}, + {"155178", "10.0.1.8:11211"}, + {"155411", "10.0.1.8:11211"}, + {"155644", "10.0.1.8:11211"}, + {"155877", "10.0.1.2:11211"}, + {"156110", "10.0.1.3:11211"}, + {"156343", "10.0.1.1:11211"}, + {"156576", "10.0.1.7:11211"}, + {"156809", "10.0.1.7:11211"}, + {"157042", "10.0.1.1:11211"}, + {"157275", "10.0.1.7:11211"}, + {"157508", "10.0.1.2:11211"}, + {"157741", "10.0.1.7:11211"}, + {"157974", "10.0.1.5:11211"}, + {"158207", "10.0.1.5:11211"}, + {"158440", "10.0.1.4:11211"}, + {"158673", "10.0.1.3:11211"}, + {"158906", "10.0.1.3:11211"}, + {"159139", "10.0.1.8:11211"}, + {"159372", "10.0.1.6:11211"}, + {"159605", "10.0.1.3:11211"}, + {"159838", "10.0.1.4:11211"}, + {"160071", "10.0.1.2:11211"}, + {"160304", "10.0.1.4:11211"}, + {"160537", "10.0.1.6:11211"}, + {"160770", "10.0.1.5:11211"}, + {"161003", "10.0.1.3:11211"}, + {"161236", "10.0.1.7:11211"}, + {"161469", "10.0.1.5:11211"}, + {"161702", "10.0.1.7:11211"}, + {"161935", "10.0.1.8:11211"}, + {"162168", "10.0.1.8:11211"}, + {"162401", "10.0.1.8:11211"}, + {"162634", "10.0.1.8:11211"}, + {"162867", "10.0.1.6:11211"}, + {"163100", "10.0.1.8:11211"}, + {"163333", "10.0.1.7:11211"}, + {"163566", "10.0.1.7:11211"}, + {"163799", "10.0.1.3:11211"}, + {"164032", "10.0.1.6:11211"}, + {"164265", "10.0.1.8:11211"}, + {"169158", "10.0.1.7:11211"}, + {"169391", "10.0.1.6:11211"}, + {"169624", "10.0.1.6:11211"}, + {"169857", "10.0.1.6:11211"}, + {"170090", "10.0.1.8:11211"}, + {"170323", "10.0.1.4:11211"}, + {"170556", "10.0.1.2:11211"}, + {"170789", "10.0.1.8:11211"}, + {"171022", "10.0.1.8:11211"}, + {"171255", "10.0.1.4:11211"}, + {"171488", "10.0.1.7:11211"}, + {"171721", "10.0.1.4:11211"}, + {"171954", "10.0.1.5:11211"}, + {"172187", "10.0.1.4:11211"}, + {"172420", "10.0.1.3:11211"}, + {"172653", "10.0.1.2:11211"}, + {"172886", "10.0.1.1:11211"}, + {"173119", "10.0.1.8:11211"}, + {"173352", "10.0.1.5:11211"}, + {"176614", "10.0.1.3:11211"}, + {"176847", "10.0.1.8:11211"}, + {"177080", "10.0.1.1:11211"}, + {"177313", "10.0.1.4:11211"}, + {"177546", "10.0.1.2:11211"}, + {"177779", "10.0.1.8:11211"}, + {"178012", "10.0.1.6:11211"}, + {"178245", "10.0.1.3:11211"}, + {"178478", "10.0.1.7:11211"}, + {"178711", "10.0.1.3:11211"}, + {"178944", "10.0.1.8:11211"}, + {"179177", "10.0.1.1:11211"}, + {"179410", "10.0.1.6:11211"}, + {"179643", "10.0.1.1:11211"}, + {"179876", "10.0.1.8:11211"}, + {"180109", "10.0.1.3:11211"}, + {"180342", "10.0.1.2:11211"}, + {"180575", "10.0.1.7:11211"}, + {"180808", "10.0.1.8:11211"}, + {"181041", "10.0.1.5:11211"}, + {"181274", "10.0.1.6:11211"}, + {"181507", "10.0.1.8:11211"}, + {"181740", "10.0.1.6:11211"}, + {"181973", "10.0.1.5:11211"}, + {"182206", "10.0.1.4:11211"}, + {"182439", "10.0.1.2:11211"}, + {"182672", "10.0.1.8:11211"}, + {"182905", "10.0.1.8:11211"}, + {"183138", "10.0.1.4:11211"}, + {"183371", "10.0.1.8:11211"}, + {"183604", "10.0.1.7:11211"}, + {"183837", "10.0.1.7:11211"}, + {"184070", "10.0.1.4:11211"}, + {"184303", "10.0.1.7:11211"}, + {"184536", "10.0.1.1:11211"}, + {"184769", "10.0.1.3:11211"}, + {"185002", "10.0.1.1:11211"}, + {"185235", "10.0.1.6:11211"}, + {"185468", "10.0.1.7:11211"}, + {"185701", "10.0.1.4:11211"}, + {"185934", "10.0.1.1:11211"}, + {"186167", "10.0.1.8:11211"}, + {"186400", "10.0.1.8:11211"}, + {"186633", "10.0.1.1:11211"}, + {"186866", "10.0.1.5:11211"}, + {"187099", "10.0.1.8:11211"}, + {"187332", "10.0.1.1:11211"}, + {"187565", "10.0.1.5:11211"}, + {"187798", "10.0.1.1:11211"}, + {"188031", "10.0.1.8:11211"}, + {"188264", "10.0.1.5:11211"}, + {"188497", "10.0.1.2:11211"}, + {"188730", "10.0.1.6:11211"}, + {"196419", "10.0.1.8:11211"}, + {"196652", "10.0.1.7:11211"}, + {"196885", "10.0.1.1:11211"}, + {"197118", "10.0.1.6:11211"}, + {"197351", "10.0.1.1:11211"}, + {"197584", "10.0.1.1:11211"}, + {"197817", "10.0.1.8:11211"}, + {"198050", "10.0.1.6:11211"}, + {"198283", "10.0.1.3:11211"}, + {"198516", "10.0.1.8:11211"}, + {"198749", "10.0.1.6:11211"}, + {"198982", "10.0.1.2:11211"}, + {"199215", "10.0.1.4:11211"}, + {"199448", "10.0.1.5:11211"}, + {"199681", "10.0.1.6:11211"}, + {"199914", "10.0.1.6:11211"}, + {"200147", "10.0.1.3:11211"}, + {"200380", "10.0.1.4:11211"}, + {"200613", "10.0.1.1:11211"}, + {"200846", "10.0.1.6:11211"}, + {"201079", "10.0.1.7:11211"}, + {"201312", "10.0.1.7:11211"}, + {"201545", "10.0.1.1:11211"}, + {"201778", "10.0.1.1:11211"}, + {"202011", "10.0.1.7:11211"}, + {"202244", "10.0.1.7:11211"}, + {"202477", "10.0.1.6:11211"}, + {"202710", "10.0.1.1:11211"}, + {"202943", "10.0.1.1:11211"}, + {"203176", "10.0.1.1:11211"}, + {"203409", "10.0.1.3:11211"}, + {"203642", "10.0.1.5:11211"}, + {"203875", "10.0.1.1:11211"}, + {"204108", "10.0.1.8:11211"}, + {"204341", "10.0.1.1:11211"}, + {"204574", "10.0.1.4:11211"}, + {"204807", "10.0.1.3:11211"}, + {"205040", "10.0.1.7:11211"}, + {"205273", "10.0.1.2:11211"}, + {"205506", "10.0.1.6:11211"}, + {"205739", "10.0.1.2:11211"}, + {"205972", "10.0.1.6:11211"}, + {"206205", "10.0.1.6:11211"}, + {"206438", "10.0.1.6:11211"}, + {"206671", "10.0.1.7:11211"}, + {"206904", "10.0.1.6:11211"}, + {"207137", "10.0.1.7:11211"}, + {"207370", "10.0.1.5:11211"}, + {"207603", "10.0.1.1:11211"}, + {"207836", "10.0.1.5:11211"}, + {"208069", "10.0.1.6:11211"}, + {"208302", "10.0.1.8:11211"}, + {"208535", "10.0.1.5:11211"}, + {"208768", "10.0.1.3:11211"}, + {"209001", "10.0.1.2:11211"}, + {"209234", "10.0.1.3:11211"}, + {"209467", "10.0.1.4:11211"}, + {"209700", "10.0.1.6:11211"}, + {"209933", "10.0.1.5:11211"}, + {"210166", "10.0.1.1:11211"}, + {"210399", "10.0.1.8:11211"}, + {"210632", "10.0.1.4:11211"}, + {"210865", "10.0.1.8:11211"}, + {"211098", "10.0.1.8:11211"}, + {"211331", "10.0.1.3:11211"}, + {"211564", "10.0.1.6:11211"}, + {"211797", "10.0.1.8:11211"}, + {"212030", "10.0.1.8:11211"}, + {"212263", "10.0.1.7:11211"}, + {"212496", "10.0.1.8:11211"}, + {"212729", "10.0.1.1:11211"}, + {"212962", "10.0.1.7:11211"}, + {"213195", "10.0.1.6:11211"}, + {"213428", "10.0.1.2:11211"}, + {"213661", "10.0.1.8:11211"}, + {"213894", "10.0.1.5:11211"}, + {"214127", "10.0.1.3:11211"}, + {"214360", "10.0.1.1:11211"}, + {"214593", "10.0.1.2:11211"}, + {"214826", "10.0.1.7:11211"}, + {"215059", "10.0.1.7:11211"}, + {"215292", "10.0.1.2:11211"}, + {"215525", "10.0.1.1:11211"}, + {"215758", "10.0.1.8:11211"}, + {"215991", "10.0.1.3:11211"}, + {"216224", "10.0.1.8:11211"}, + {"216457", "10.0.1.3:11211"}, + {"216690", "10.0.1.7:11211"}, + {"216923", "10.0.1.2:11211"}, + {"217156", "10.0.1.2:11211"}, + {"217389", "10.0.1.2:11211"}, + {"217622", "10.0.1.4:11211"}, + {"217855", "10.0.1.3:11211"}, + {"218088", "10.0.1.7:11211"}, + {"218321", "10.0.1.8:11211"}, + {"218554", "10.0.1.7:11211"}, + {"218787", "10.0.1.2:11211"}, + {"219020", "10.0.1.8:11211"}, + {"219253", "10.0.1.8:11211"}, + {"219486", "10.0.1.5:11211"}, + {"219719", "10.0.1.2:11211"}, + {"219952", "10.0.1.3:11211"}, + {"220185", "10.0.1.7:11211"}, + {"220418", "10.0.1.7:11211"}, + {"220651", "10.0.1.7:11211"}, + {"220884", "10.0.1.5:11211"}, + {"221117", "10.0.1.5:11211"}, + {"221350", "10.0.1.8:11211"}, + {"221583", "10.0.1.7:11211"}, + {"221816", "10.0.1.7:11211"}, + {"222049", "10.0.1.8:11211"}, + {"222282", "10.0.1.2:11211"}, + {"222515", "10.0.1.8:11211"}, + {"222748", "10.0.1.8:11211"}, + {"222981", "10.0.1.6:11211"}, + {"223214", "10.0.1.2:11211"}, + {"223447", "10.0.1.4:11211"}, + {"223680", "10.0.1.2:11211"}, + {"223913", "10.0.1.8:11211"}, + {"224146", "10.0.1.8:11211"}, + {"224379", "10.0.1.3:11211"}, + {"224612", "10.0.1.5:11211"}, + {"224845", "10.0.1.8:11211"}, + {"225078", "10.0.1.8:11211"}, + {"225311", "10.0.1.3:11211"}, + {"225544", "10.0.1.5:11211"}, + {"225777", "10.0.1.1:11211"}, + {"226010", "10.0.1.6:11211"}, + {"226243", "10.0.1.7:11211"}, + {"226476", "10.0.1.2:11211"}, + {"226709", "10.0.1.3:11211"}, + {"226942", "10.0.1.5:11211"}, + {"227175", "10.0.1.3:11211"}, + {"227408", "10.0.1.1:11211"}, + {"227641", "10.0.1.8:11211"}, + {"227874", "10.0.1.4:11211"}, + {"228107", "10.0.1.6:11211"}, + {"228340", "10.0.1.4:11211"}, + {"228573", "10.0.1.1:11211"}, + {"228806", "10.0.1.3:11211"}, + {"238825", "10.0.1.2:11211"}, + {"239058", "10.0.1.5:11211"}, + {"239291", "10.0.1.5:11211"}, + {"239524", "10.0.1.6:11211"}, + {"239757", "10.0.1.5:11211"}, + {"239990", "10.0.1.7:11211"}, + {"240223", "10.0.1.7:11211"}, + {"240456", "10.0.1.5:11211"}, + {"240689", "10.0.1.5:11211"}, + {"240922", "10.0.1.1:11211"}, + {"241155", "10.0.1.1:11211"}, + {"241388", "10.0.1.7:11211"}, + {"241621", "10.0.1.1:11211"}, + {"241854", "10.0.1.8:11211"}, + {"242087", "10.0.1.6:11211"}, + {"242320", "10.0.1.8:11211"}, + {"242553", "10.0.1.2:11211"}, + {"242786", "10.0.1.5:11211"}, + {"243019", "10.0.1.6:11211"}, + {"243252", "10.0.1.4:11211"}, + {"243485", "10.0.1.8:11211"}, + {"243718", "10.0.1.7:11211"}, + {"243951", "10.0.1.5:11211"}, + {"249310", "10.0.1.5:11211"}, + {"249543", "10.0.1.8:11211"}, + {"249776", "10.0.1.7:11211"}, + {"250009", "10.0.1.3:11211"}, + {"250242", "10.0.1.8:11211"}, + {"250475", "10.0.1.4:11211"}, + {"250708", "10.0.1.8:11211"}, + {"250941", "10.0.1.1:11211"}, + {"251174", "10.0.1.5:11211"}, + {"251407", "10.0.1.8:11211"}, + {"251640", "10.0.1.1:11211"}, + {"251873", "10.0.1.2:11211"}, + {"252106", "10.0.1.5:11211"}, + {"252339", "10.0.1.2:11211"}, + {"252572", "10.0.1.5:11211"}, + {"252805", "10.0.1.8:11211"}, + {"253038", "10.0.1.5:11211"}, + {"253271", "10.0.1.7:11211"}, + {"253504", "10.0.1.4:11211"}, + {"253737", "10.0.1.3:11211"}, + {"253970", "10.0.1.7:11211"}, + {"254203", "10.0.1.6:11211"}, + {"254436", "10.0.1.2:11211"}, + {"254669", "10.0.1.5:11211"}, + {"254902", "10.0.1.8:11211"}, + {"255135", "10.0.1.5:11211"}, + {"255368", "10.0.1.3:11211"}, + {"255601", "10.0.1.4:11211"}, + {"255834", "10.0.1.1:11211"}, + {"256067", "10.0.1.8:11211"}, + {"260960", "10.0.1.5:11211"}, + {"261193", "10.0.1.5:11211"}, + {"261426", "10.0.1.2:11211"}, + {"261659", "10.0.1.2:11211"}, + {"261892", "10.0.1.2:11211"}, + {"262125", "10.0.1.8:11211"}, + {"262358", "10.0.1.6:11211"}, + {"262591", "10.0.1.8:11211"}, + {"262824", "10.0.1.1:11211"}, + {"263057", "10.0.1.5:11211"}, + {"263290", "10.0.1.1:11211"}, + {"263523", "10.0.1.3:11211"}, + {"263756", "10.0.1.2:11211"}, + {"263989", "10.0.1.5:11211"}, + {"264222", "10.0.1.5:11211"}, + {"264455", "10.0.1.5:11211"}, + {"264688", "10.0.1.5:11211"}, + {"264921", "10.0.1.6:11211"}, + {"265154", "10.0.1.7:11211"}, + {"265387", "10.0.1.7:11211"}, + {"265620", "10.0.1.4:11211"}, + {"265853", "10.0.1.5:11211"}, + {"266086", "10.0.1.6:11211"}, + {"266319", "10.0.1.1:11211"}, + {"266552", "10.0.1.2:11211"}, + {"266785", "10.0.1.7:11211"}, + {"267018", "10.0.1.1:11211"}, + {"267251", "10.0.1.6:11211"}, + {"267484", "10.0.1.8:11211"}, + {"267717", "10.0.1.3:11211"}, + {"267950", "10.0.1.2:11211"}, + {"268183", "10.0.1.5:11211"}, + {"268416", "10.0.1.6:11211"}, + {"268649", "10.0.1.5:11211"}, + {"268882", "10.0.1.6:11211"}, + {"269115", "10.0.1.5:11211"}, + {"269348", "10.0.1.2:11211"}, + {"269581", "10.0.1.4:11211"}, + {"269814", "10.0.1.6:11211"}, + {"270047", "10.0.1.2:11211"}, + {"270280", "10.0.1.1:11211"}, + {"270513", "10.0.1.8:11211"}, + {"270746", "10.0.1.6:11211"}, + {"270979", "10.0.1.4:11211"}, + {"271212", "10.0.1.6:11211"}, + {"271445", "10.0.1.8:11211"}, + {"271678", "10.0.1.7:11211"}, + {"271911", "10.0.1.3:11211"}, + {"272144", "10.0.1.8:11211"}, + {"272377", "10.0.1.7:11211"}, + {"272610", "10.0.1.1:11211"}, + {"272843", "10.0.1.8:11211"}, + {"273076", "10.0.1.8:11211"}, + {"273309", "10.0.1.1:11211"}, + {"273542", "10.0.1.8:11211"}, + {"273775", "10.0.1.5:11211"}, + {"274008", "10.0.1.7:11211"}, + {"274241", "10.0.1.7:11211"}, + {"274474", "10.0.1.6:11211"}, + {"274707", "10.0.1.6:11211"}, + {"274940", "10.0.1.8:11211"}, + {"275173", "10.0.1.4:11211"}, + {"275406", "10.0.1.6:11211"}, + {"275639", "10.0.1.2:11211"}, + {"275872", "10.0.1.3:11211"}, + {"276105", "10.0.1.5:11211"}, + {"276338", "10.0.1.2:11211"}, + {"276571", "10.0.1.4:11211"}, + {"276804", "10.0.1.4:11211"}, + {"277037", "10.0.1.6:11211"}, + {"277270", "10.0.1.6:11211"}, + {"277503", "10.0.1.1:11211"}, + {"277736", "10.0.1.6:11211"}, + {"277969", "10.0.1.7:11211"}, + {"278202", "10.0.1.3:11211"}, + {"278435", "10.0.1.6:11211"}, + {"278668", "10.0.1.3:11211"}, + {"278901", "10.0.1.1:11211"}, + {"279134", "10.0.1.7:11211"}, + {"279367", "10.0.1.4:11211"}, + {"279600", "10.0.1.6:11211"}, + {"279833", "10.0.1.8:11211"}, + {"280066", "10.0.1.7:11211"}, + {"280299", "10.0.1.8:11211"}, + {"280532", "10.0.1.5:11211"}, + {"280765", "10.0.1.8:11211"}, + {"280998", "10.0.1.1:11211"}, + {"281231", "10.0.1.5:11211"}, + {"281464", "10.0.1.8:11211"}, + {"281697", "10.0.1.2:11211"}, + {"281930", "10.0.1.7:11211"}, + {"282163", "10.0.1.5:11211"}, + {"282396", "10.0.1.4:11211"}, + {"282629", "10.0.1.7:11211"}, + {"282862", "10.0.1.7:11211"}, + {"283095", "10.0.1.5:11211"}, + {"283328", "10.0.1.2:11211"}, + {"283561", "10.0.1.1:11211"}, + {"283794", "10.0.1.8:11211"}, + {"284027", "10.0.1.2:11211"}, + {"284260", "10.0.1.6:11211"}, + {"284493", "10.0.1.2:11211"}, + {"284726", "10.0.1.2:11211"}, + {"284959", "10.0.1.6:11211"}, + {"285192", "10.0.1.3:11211"}, + {"285425", "10.0.1.6:11211"}, + {"285658", "10.0.1.2:11211"}, + {"285891", "10.0.1.8:11211"}, + {"286124", "10.0.1.3:11211"}, + {"286357", "10.0.1.4:11211"}, + {"286590", "10.0.1.5:11211"}, + {"286823", "10.0.1.7:11211"}, + {"287056", "10.0.1.6:11211"}, + {"287289", "10.0.1.3:11211"}, + {"287522", "10.0.1.3:11211"}, + {"287755", "10.0.1.3:11211"}, + {"287988", "10.0.1.6:11211"}, + {"288221", "10.0.1.8:11211"}, + {"288454", "10.0.1.4:11211"}, + {"288687", "10.0.1.3:11211"}, + {"288920", "10.0.1.5:11211"}, + {"289153", "10.0.1.8:11211"}, + {"289386", "10.0.1.7:11211"}, + {"289619", "10.0.1.8:11211"}, + {"289852", "10.0.1.8:11211"}, + {"290085", "10.0.1.3:11211"}, + {"290318", "10.0.1.7:11211"}, + {"290551", "10.0.1.8:11211"}, + {"290784", "10.0.1.7:11211"}, + {"291017", "10.0.1.5:11211"}, + {"291250", "10.0.1.8:11211"}, + {"291483", "10.0.1.3:11211"}, + {"291716", "10.0.1.3:11211"}, + {"291949", "10.0.1.1:11211"}, + {"292182", "10.0.1.8:11211"}, + {"292415", "10.0.1.5:11211"}, + {"292648", "10.0.1.8:11211"}, + {"292881", "10.0.1.5:11211"}, + {"293114", "10.0.1.7:11211"}, + {"293347", "10.0.1.5:11211"}, + {"293580", "10.0.1.5:11211"}, + {"293813", "10.0.1.5:11211"}, + {"294046", "10.0.1.3:11211"}, + {"294279", "10.0.1.6:11211"}, + {"294512", "10.0.1.8:11211"}, + {"294745", "10.0.1.7:11211"}, + {"294978", "10.0.1.7:11211"}, + {"295211", "10.0.1.2:11211"}, + {"295444", "10.0.1.5:11211"}, + {"295677", "10.0.1.7:11211"}, + {"295910", "10.0.1.3:11211"}, + {"296143", "10.0.1.7:11211"}, + {"296376", "10.0.1.4:11211"}, + {"296609", "10.0.1.3:11211"}, + {"296842", "10.0.1.6:11211"}, + {"297075", "10.0.1.2:11211"}, + {"297308", "10.0.1.4:11211"}, + {"297541", "10.0.1.8:11211"}, + {"297774", "10.0.1.3:11211"}, + {"298007", "10.0.1.3:11211"}, + {"298240", "10.0.1.5:11211"}, + {"298473", "10.0.1.4:11211"}, + {"298706", "10.0.1.5:11211"}, + {"298939", "10.0.1.7:11211"}, + {"303599", "10.0.1.6:11211"}, + {"303832", "10.0.1.4:11211"}, + {"304065", "10.0.1.3:11211"}, + {"304298", "10.0.1.1:11211"}, + {"304531", "10.0.1.1:11211"}, + {"304764", "10.0.1.8:11211"}, + {"304997", "10.0.1.7:11211"}, + {"305230", "10.0.1.6:11211"}, + {"305463", "10.0.1.2:11211"}, + {"305696", "10.0.1.7:11211"}, + {"305929", "10.0.1.4:11211"}, + {"306162", "10.0.1.4:11211"}, + {"306395", "10.0.1.4:11211"}, + {"306628", "10.0.1.8:11211"}, + {"306861", "10.0.1.7:11211"}, + {"307094", "10.0.1.4:11211"}, + {"307327", "10.0.1.8:11211"}, + {"307560", "10.0.1.2:11211"}, + {"307793", "10.0.1.8:11211"}, + {"308026", "10.0.1.5:11211"}, + {"308259", "10.0.1.6:11211"}, + {"308492", "10.0.1.2:11211"}, + {"308725", "10.0.1.6:11211"}, + {"308958", "10.0.1.3:11211"}, + {"309191", "10.0.1.7:11211"}, + {"309424", "10.0.1.7:11211"}, + {"309657", "10.0.1.8:11211"}, + {"309890", "10.0.1.8:11211"}, + {"310123", "10.0.1.1:11211"}, + {"310356", "10.0.1.3:11211"}, + {"310589", "10.0.1.5:11211"}, + {"310822", "10.0.1.8:11211"}, + {"311055", "10.0.1.3:11211"}, + {"311288", "10.0.1.8:11211"}, + {"311521", "10.0.1.8:11211"}, + {"311754", "10.0.1.3:11211"}, + {"311987", "10.0.1.7:11211"}, + {"312220", "10.0.1.5:11211"}, + {"312453", "10.0.1.2:11211"}, + {"312686", "10.0.1.2:11211"}, + {"312919", "10.0.1.1:11211"}, + {"313152", "10.0.1.8:11211"}, + {"313385", "10.0.1.2:11211"}, + {"313618", "10.0.1.8:11211"}, + {"313851", "10.0.1.4:11211"}, + {"314084", "10.0.1.4:11211"}, + {"314317", "10.0.1.6:11211"}, + {"314550", "10.0.1.5:11211"}, + {"314783", "10.0.1.1:11211"}, + {"315016", "10.0.1.1:11211"}, + {"315249", "10.0.1.6:11211"}, + {"315482", "10.0.1.4:11211"}, + {"315715", "10.0.1.2:11211"}, + {"315948", "10.0.1.7:11211"}, + {"316181", "10.0.1.2:11211"}, + {"316414", "10.0.1.8:11211"}, + {"316647", "10.0.1.7:11211"}, + {"316880", "10.0.1.7:11211"}, + {"317113", "10.0.1.8:11211"}, + {"317346", "10.0.1.7:11211"}, + {"317579", "10.0.1.4:11211"}, + {"317812", "10.0.1.2:11211"}, + {"318045", "10.0.1.1:11211"}, + {"318278", "10.0.1.6:11211"}, + {"318511", "10.0.1.6:11211"}, + {"318744", "10.0.1.1:11211"}, + {"318977", "10.0.1.5:11211"}, + {"319210", "10.0.1.2:11211"}, + {"319443", "10.0.1.4:11211"}, + {"319676", "10.0.1.7:11211"}, + {"319909", "10.0.1.3:11211"}, + {"320142", "10.0.1.8:11211"}, + {"320375", "10.0.1.1:11211"}, + {"320608", "10.0.1.8:11211"}, + {"320841", "10.0.1.1:11211"}, + {"321074", "10.0.1.5:11211"}, + {"321307", "10.0.1.2:11211"}, + {"321540", "10.0.1.3:11211"}, + {"321773", "10.0.1.6:11211"}, + {"322006", "10.0.1.3:11211"}, + {"322239", "10.0.1.7:11211"}, + {"322472", "10.0.1.6:11211"}, + {"322705", "10.0.1.8:11211"}, + {"322938", "10.0.1.4:11211"}, + {"323171", "10.0.1.7:11211"}, + {"323404", "10.0.1.2:11211"}, + {"323637", "10.0.1.4:11211"}, + {"323870", "10.0.1.2:11211"}, + {"324103", "10.0.1.7:11211"}, + {"324336", "10.0.1.3:11211"}, + {"324569", "10.0.1.2:11211"}, + {"324802", "10.0.1.3:11211"}, + {"325035", "10.0.1.8:11211"}, + {"325268", "10.0.1.5:11211"}, + {"325501", "10.0.1.6:11211"}, + {"325734", "10.0.1.3:11211"}, + {"325967", "10.0.1.5:11211"}, + {"326200", "10.0.1.1:11211"}, + {"326433", "10.0.1.8:11211"}, + {"326666", "10.0.1.4:11211"}, + {"326899", "10.0.1.2:11211"}, + {"327132", "10.0.1.4:11211"}, + {"327365", "10.0.1.1:11211"}, + {"327598", "10.0.1.3:11211"}, + {"327831", "10.0.1.6:11211"}, + {"328064", "10.0.1.4:11211"}, + {"328297", "10.0.1.6:11211"}, + {"328530", "10.0.1.8:11211"}, + {"328763", "10.0.1.2:11211"}, + {"328996", "10.0.1.1:11211"}, + {"329229", "10.0.1.3:11211"}, + {"329462", "10.0.1.7:11211"}, + {"329695", "10.0.1.6:11211"}, + {"329928", "10.0.1.1:11211"}, + {"330161", "10.0.1.7:11211"}, + {"330394", "10.0.1.1:11211"}, + {"330627", "10.0.1.3:11211"}, + {"330860", "10.0.1.8:11211"}, + {"331093", "10.0.1.2:11211"}, + {"331326", "10.0.1.6:11211"}, + {"331559", "10.0.1.4:11211"}, + {"331792", "10.0.1.7:11211"}, + {"332025", "10.0.1.8:11211"}, + {"332258", "10.0.1.2:11211"}, + {"332491", "10.0.1.5:11211"}, + {"332724", "10.0.1.6:11211"}, + {"332957", "10.0.1.4:11211"}, + {"333190", "10.0.1.8:11211"}, + {"333423", "10.0.1.7:11211"}, + {"333656", "10.0.1.7:11211"}, + {"333889", "10.0.1.1:11211"}, + {"334122", "10.0.1.5:11211"}, + {"334355", "10.0.1.4:11211"}, + {"334588", "10.0.1.8:11211"}, + {"334821", "10.0.1.3:11211"}, + {"335054", "10.0.1.3:11211"}, + {"335287", "10.0.1.7:11211"}, + {"335520", "10.0.1.8:11211"}, + {"335753", "10.0.1.2:11211"}, + {"335986", "10.0.1.3:11211"}, + {"336219", "10.0.1.2:11211"}, + {"336452", "10.0.1.8:11211"}, + {"336685", "10.0.1.2:11211"}, + {"336918", "10.0.1.7:11211"}, + {"337151", "10.0.1.4:11211"}, + {"337384", "10.0.1.6:11211"}, + {"337617", "10.0.1.1:11211"}, + {"337850", "10.0.1.2:11211"}, + {"338083", "10.0.1.7:11211"}, + {"338316", "10.0.1.8:11211"}, + {"338549", "10.0.1.5:11211"}, + {"338782", "10.0.1.8:11211"}, + {"339015", "10.0.1.8:11211"}, + {"339248", "10.0.1.5:11211"}, + {"339481", "10.0.1.8:11211"}, + {"339714", "10.0.1.3:11211"}, + {"339947", "10.0.1.7:11211"}, + {"340180", "10.0.1.5:11211"}, + {"340413", "10.0.1.5:11211"}, + {"340646", "10.0.1.8:11211"}, + {"340879", "10.0.1.3:11211"}, + {"341112", "10.0.1.6:11211"}, + {"341345", "10.0.1.8:11211"}, + {"341578", "10.0.1.7:11211"}, + {"341811", "10.0.1.2:11211"}, + {"342044", "10.0.1.1:11211"}, + {"342277", "10.0.1.4:11211"}, + {"342510", "10.0.1.5:11211"}, + {"342743", "10.0.1.5:11211"}, + {"342976", "10.0.1.4:11211"}, + {"343209", "10.0.1.7:11211"}, + {"343442", "10.0.1.8:11211"}, + {"343675", "10.0.1.1:11211"}, + {"343908", "10.0.1.2:11211"}, + {"348801", "10.0.1.2:11211"}, + {"349034", "10.0.1.7:11211"}, + {"349267", "10.0.1.5:11211"}, + {"349500", "10.0.1.7:11211"}, + {"349733", "10.0.1.8:11211"}, + {"349966", "10.0.1.3:11211"}, + {"350199", "10.0.1.8:11211"}, + {"350432", "10.0.1.4:11211"}, + {"350665", "10.0.1.5:11211"}, + {"350898", "10.0.1.2:11211"}, + {"351131", "10.0.1.5:11211"}, + {"351364", "10.0.1.8:11211"}, + {"351597", "10.0.1.3:11211"}, + {"351830", "10.0.1.5:11211"}, + {"352063", "10.0.1.3:11211"}, + {"352296", "10.0.1.1:11211"}, + {"352529", "10.0.1.4:11211"}, + {"352762", "10.0.1.5:11211"}, + {"352995", "10.0.1.1:11211"}, + {"353228", "10.0.1.8:11211"}, + {"357888", "10.0.1.5:11211"}, + {"358121", "10.0.1.8:11211"}, + {"358354", "10.0.1.1:11211"}, + {"358587", "10.0.1.1:11211"}, + {"358820", "10.0.1.7:11211"}, + {"359053", "10.0.1.4:11211"}, + {"359286", "10.0.1.2:11211"}, + {"359519", "10.0.1.3:11211"}, + {"359752", "10.0.1.1:11211"}, + {"359985", "10.0.1.1:11211"}, + {"360218", "10.0.1.4:11211"}, + {"360451", "10.0.1.5:11211"}, + {"360684", "10.0.1.2:11211"}, + {"360917", "10.0.1.4:11211"}, + {"361150", "10.0.1.1:11211"}, + {"361383", "10.0.1.2:11211"}, + {"361616", "10.0.1.3:11211"}, + {"361849", "10.0.1.3:11211"}, + {"362082", "10.0.1.2:11211"}, + {"362315", "10.0.1.4:11211"}, + {"362548", "10.0.1.5:11211"}, + {"362781", "10.0.1.6:11211"}, + {"363014", "10.0.1.7:11211"}, + {"363247", "10.0.1.8:11211"}, + {"363480", "10.0.1.7:11211"}, + {"363713", "10.0.1.4:11211"}, + {"363946", "10.0.1.4:11211"}, + {"364179", "10.0.1.2:11211"}, + {"364412", "10.0.1.5:11211"}, + {"364645", "10.0.1.8:11211"}, + {"364878", "10.0.1.1:11211"}, + {"365111", "10.0.1.6:11211"}, + {"365344", "10.0.1.5:11211"}, + {"365577", "10.0.1.6:11211"}, + {"365810", "10.0.1.8:11211"}, + {"366043", "10.0.1.5:11211"}, + {"366276", "10.0.1.4:11211"}, + {"366509", "10.0.1.6:11211"}, + {"366742", "10.0.1.7:11211"}, + {"374431", "10.0.1.4:11211"}, + {"374664", "10.0.1.8:11211"}, + {"374897", "10.0.1.6:11211"}, + {"375130", "10.0.1.3:11211"}, + {"375363", "10.0.1.4:11211"}, + {"375596", "10.0.1.8:11211"}, + {"375829", "10.0.1.1:11211"}, + {"376062", "10.0.1.3:11211"}, + {"376295", "10.0.1.1:11211"}, + {"376528", "10.0.1.1:11211"}, + {"376761", "10.0.1.4:11211"}, + {"376994", "10.0.1.7:11211"}, + {"377227", "10.0.1.2:11211"}, + {"377460", "10.0.1.8:11211"}, + {"377693", "10.0.1.1:11211"}, + {"377926", "10.0.1.5:11211"}, + {"378159", "10.0.1.1:11211"}, + {"378392", "10.0.1.8:11211"}, + {"378625", "10.0.1.7:11211"}, + {"378858", "10.0.1.4:11211"}, + {"379091", "10.0.1.3:11211"}, + {"379324", "10.0.1.8:11211"}, + {"379557", "10.0.1.2:11211"}, + {"379790", "10.0.1.2:11211"}, + {"380023", "10.0.1.8:11211"}, + {"380256", "10.0.1.6:11211"}, + {"380489", "10.0.1.2:11211"}, + {"380722", "10.0.1.8:11211"}, + {"380955", "10.0.1.5:11211"}, + {"381188", "10.0.1.8:11211"}, + {"381421", "10.0.1.8:11211"}, + {"381654", "10.0.1.4:11211"}, + {"381887", "10.0.1.7:11211"}, + {"382120", "10.0.1.7:11211"}, + {"382353", "10.0.1.3:11211"}, + {"382586", "10.0.1.6:11211"}, + {"382819", "10.0.1.2:11211"}, + {"383052", "10.0.1.8:11211"}, + {"383285", "10.0.1.7:11211"}, + {"383518", "10.0.1.8:11211"}, + {"383751", "10.0.1.2:11211"}, + {"383984", "10.0.1.6:11211"}, + {"384217", "10.0.1.6:11211"}, + {"384450", "10.0.1.3:11211"}, + {"384683", "10.0.1.8:11211"}, + {"384916", "10.0.1.3:11211"}, + {"385149", "10.0.1.1:11211"}, + {"385382", "10.0.1.2:11211"}, + {"385615", "10.0.1.1:11211"}, + {"385848", "10.0.1.5:11211"}, + {"386081", "10.0.1.5:11211"}, + {"386314", "10.0.1.6:11211"}, + {"386547", "10.0.1.3:11211"}, + {"386780", "10.0.1.4:11211"}, + {"387013", "10.0.1.7:11211"}, + {"387246", "10.0.1.8:11211"}, + {"387479", "10.0.1.3:11211"}, + {"387712", "10.0.1.8:11211"}, + {"387945", "10.0.1.8:11211"}, + {"388178", "10.0.1.6:11211"}, + {"388411", "10.0.1.1:11211"}, + {"388644", "10.0.1.8:11211"}, + {"388877", "10.0.1.7:11211"}, + {"389110", "10.0.1.1:11211"}, + {"389343", "10.0.1.1:11211"}, + {"389576", "10.0.1.8:11211"}, + {"389809", "10.0.1.2:11211"}, + {"390042", "10.0.1.5:11211"}, + {"390275", "10.0.1.7:11211"}, + {"390508", "10.0.1.2:11211"}, + {"390741", "10.0.1.2:11211"}, + {"390974", "10.0.1.4:11211"}, + {"391207", "10.0.1.3:11211"}, + {"391440", "10.0.1.4:11211"}, + {"391673", "10.0.1.4:11211"}, + {"391906", "10.0.1.7:11211"}, + {"392139", "10.0.1.3:11211"}, + {"392372", "10.0.1.6:11211"}, + {"392605", "10.0.1.6:11211"}, + {"392838", "10.0.1.6:11211"}, + {"393071", "10.0.1.2:11211"}, + {"393304", "10.0.1.2:11211"}, + {"393537", "10.0.1.2:11211"}, + {"393770", "10.0.1.4:11211"}, + {"394003", "10.0.1.1:11211"}, + {"394236", "10.0.1.6:11211"}, + {"394469", "10.0.1.4:11211"}, + {"394702", "10.0.1.6:11211"}, + {"394935", "10.0.1.7:11211"}, + {"395168", "10.0.1.4:11211"}, + {"395401", "10.0.1.3:11211"}, + {"395634", "10.0.1.4:11211"}, + {"395867", "10.0.1.1:11211"}, + {"396100", "10.0.1.6:11211"}, + {"396333", "10.0.1.4:11211"}, + {"396566", "10.0.1.8:11211"}, + {"396799", "10.0.1.3:11211"}, + {"397032", "10.0.1.4:11211"}, + {"397265", "10.0.1.3:11211"}, + {"397498", "10.0.1.5:11211"}, + {"397731", "10.0.1.1:11211"}, + {"397964", "10.0.1.2:11211"}, + {"398197", "10.0.1.8:11211"}, + {"398430", "10.0.1.7:11211"}, + {"398663", "10.0.1.1:11211"}, + {"398896", "10.0.1.5:11211"}, + {"399129", "10.0.1.5:11211"}, + {"399362", "10.0.1.8:11211"}, + {"399595", "10.0.1.8:11211"}, + {"399828", "10.0.1.7:11211"}, + {"400061", "10.0.1.6:11211"}, + {"400294", "10.0.1.8:11211"}, + {"400527", "10.0.1.2:11211"}, + {"400760", "10.0.1.7:11211"}, + {"400993", "10.0.1.7:11211"}, + {"401226", "10.0.1.1:11211"}, + {"401459", "10.0.1.1:11211"}, + {"401692", "10.0.1.2:11211"}, + {"401925", "10.0.1.1:11211"}, + {"402158", "10.0.1.1:11211"}, + {"402391", "10.0.1.5:11211"}, + {"402624", "10.0.1.1:11211"}, + {"402857", "10.0.1.8:11211"}, + {"403090", "10.0.1.1:11211"}, + {"403323", "10.0.1.5:11211"}, + {"403556", "10.0.1.6:11211"}, + {"403789", "10.0.1.6:11211"}, + {"404022", "10.0.1.2:11211"}, + {"404255", "10.0.1.4:11211"}, + {"404488", "10.0.1.1:11211"}, + {"404721", "10.0.1.8:11211"}, + {"404954", "10.0.1.1:11211"}, + {"405187", "10.0.1.7:11211"}, + {"405420", "10.0.1.7:11211"}, + {"405653", "10.0.1.7:11211"}, + {"405886", "10.0.1.1:11211"}, + {"406119", "10.0.1.3:11211"}, + {"406352", "10.0.1.5:11211"}, + {"411944", "10.0.1.1:11211"}, + {"412177", "10.0.1.8:11211"}, + {"412410", "10.0.1.3:11211"}, + {"412643", "10.0.1.7:11211"}, + {"412876", "10.0.1.4:11211"}, + {"413109", "10.0.1.8:11211"}, + {"413342", "10.0.1.7:11211"}, + {"413575", "10.0.1.8:11211"}, + {"413808", "10.0.1.7:11211"}, + {"414041", "10.0.1.1:11211"}, + {"414274", "10.0.1.5:11211"}, + {"414507", "10.0.1.1:11211"}, + {"414740", "10.0.1.8:11211"}, + {"414973", "10.0.1.2:11211"}, + {"415206", "10.0.1.8:11211"}, + {"415439", "10.0.1.6:11211"}, + {"415672", "10.0.1.1:11211"}, + {"415905", "10.0.1.7:11211"}, + {"416138", "10.0.1.6:11211"}, + {"416371", "10.0.1.8:11211"}, + {"416604", "10.0.1.4:11211"}, + {"416837", "10.0.1.4:11211"}, + {"417070", "10.0.1.4:11211"}, + {"417303", "10.0.1.6:11211"}, + {"417536", "10.0.1.6:11211"}, + {"417769", "10.0.1.8:11211"}, + {"418002", "10.0.1.3:11211"}, + {"418235", "10.0.1.8:11211"}, + {"418468", "10.0.1.4:11211"}, + {"418701", "10.0.1.4:11211"}, + {"418934", "10.0.1.7:11211"}, + {"419167", "10.0.1.6:11211"}, + {"419400", "10.0.1.3:11211"}, + {"419633", "10.0.1.5:11211"}, + {"419866", "10.0.1.8:11211"}, + {"420099", "10.0.1.6:11211"}, + {"420332", "10.0.1.3:11211"}, + {"420565", "10.0.1.5:11211"}, + {"420798", "10.0.1.8:11211"}, + {"421031", "10.0.1.8:11211"}, + {"421264", "10.0.1.6:11211"}, + {"426856", "10.0.1.3:11211"}, + {"427089", "10.0.1.5:11211"}, + {"427322", "10.0.1.1:11211"}, + {"427555", "10.0.1.6:11211"}, + {"427788", "10.0.1.5:11211"}, + {"428021", "10.0.1.8:11211"}, + {"428254", "10.0.1.3:11211"}, + {"428487", "10.0.1.7:11211"}, + {"428720", "10.0.1.4:11211"}, + {"428953", "10.0.1.4:11211"}, + {"429186", "10.0.1.8:11211"}, + {"429419", "10.0.1.6:11211"}, + {"429652", "10.0.1.8:11211"}, + {"429885", "10.0.1.6:11211"}, + {"430118", "10.0.1.5:11211"}, + {"430351", "10.0.1.4:11211"}, + {"430584", "10.0.1.6:11211"}, + {"430817", "10.0.1.5:11211"}, + {"431050", "10.0.1.3:11211"}, + {"431283", "10.0.1.8:11211"}, + {"431516", "10.0.1.1:11211"}, + {"431749", "10.0.1.4:11211"}, + {"431982", "10.0.1.5:11211"}, + {"432215", "10.0.1.6:11211"}, + {"432448", "10.0.1.6:11211"}, + {"432681", "10.0.1.4:11211"}, + {"432914", "10.0.1.2:11211"}, + {"433147", "10.0.1.7:11211"}, + {"433380", "10.0.1.6:11211"}, + {"433613", "10.0.1.1:11211"}, + {"433846", "10.0.1.3:11211"}, + {"434079", "10.0.1.1:11211"}, + {"434312", "10.0.1.2:11211"}, + {"434545", "10.0.1.4:11211"}, + {"434778", "10.0.1.3:11211"}, + {"435011", "10.0.1.5:11211"}, + {"435244", "10.0.1.2:11211"}, + {"435477", "10.0.1.4:11211"}, + {"435710", "10.0.1.5:11211"}, + {"435943", "10.0.1.6:11211"}, + {"436176", "10.0.1.8:11211"}, + {"436409", "10.0.1.5:11211"}, + {"436642", "10.0.1.1:11211"}, + {"436875", "10.0.1.8:11211"}, + {"437108", "10.0.1.6:11211"}, + {"437341", "10.0.1.2:11211"}, + {"437574", "10.0.1.1:11211"}, + {"437807", "10.0.1.8:11211"}, + {"438040", "10.0.1.7:11211"}, + {"438273", "10.0.1.3:11211"}, + {"438506", "10.0.1.4:11211"}, + {"438739", "10.0.1.8:11211"}, + {"438972", "10.0.1.7:11211"}, + {"439205", "10.0.1.8:11211"}, + {"439438", "10.0.1.1:11211"}, + {"439671", "10.0.1.3:11211"}, + {"439904", "10.0.1.3:11211"}, + {"440137", "10.0.1.4:11211"}, + {"440370", "10.0.1.3:11211"}, + {"457612", "10.0.1.8:11211"}, + {"457845", "10.0.1.3:11211"}, + {"458078", "10.0.1.1:11211"}, + {"458311", "10.0.1.3:11211"}, + {"458544", "10.0.1.7:11211"}, + {"458777", "10.0.1.7:11211"}, + {"459010", "10.0.1.5:11211"}, + {"459243", "10.0.1.6:11211"}, + {"459476", "10.0.1.6:11211"}, + {"459709", "10.0.1.6:11211"}, + {"459942", "10.0.1.1:11211"}, + {"460175", "10.0.1.1:11211"}, + {"460408", "10.0.1.7:11211"}, + {"460641", "10.0.1.5:11211"}, + {"460874", "10.0.1.5:11211"}, + {"461107", "10.0.1.3:11211"}, + {"461340", "10.0.1.4:11211"}, + {"461573", "10.0.1.7:11211"}, + {"461806", "10.0.1.7:11211"}, + {"462039", "10.0.1.4:11211"}, + {"462272", "10.0.1.1:11211"}, + {"462505", "10.0.1.8:11211"}, + {"462738", "10.0.1.5:11211"}, + {"462971", "10.0.1.3:11211"}, + {"463204", "10.0.1.8:11211"}, + {"463437", "10.0.1.8:11211"}, + {"463670", "10.0.1.5:11211"}, + {"463903", "10.0.1.1:11211"}, + {"464136", "10.0.1.5:11211"}, + {"464369", "10.0.1.6:11211"}, + {"464602", "10.0.1.7:11211"}, + {"464835", "10.0.1.2:11211"}, + {"465068", "10.0.1.2:11211"}, + {"465301", "10.0.1.1:11211"}, + {"465534", "10.0.1.8:11211"}, + {"465767", "10.0.1.6:11211"}, + {"466000", "10.0.1.1:11211"}, + {"466233", "10.0.1.6:11211"}, + {"466466", "10.0.1.3:11211"}, + {"466699", "10.0.1.6:11211"}, + {"466932", "10.0.1.4:11211"}, + {"467165", "10.0.1.4:11211"}, + {"467398", "10.0.1.2:11211"}, + {"467631", "10.0.1.7:11211"}, + {"467864", "10.0.1.3:11211"}, + {"468097", "10.0.1.6:11211"}, + {"468330", "10.0.1.1:11211"}, + {"468563", "10.0.1.1:11211"}, + {"468796", "10.0.1.7:11211"}, + {"469029", "10.0.1.6:11211"}, + {"469262", "10.0.1.3:11211"}, + {"469495", "10.0.1.6:11211"}, + {"469728", "10.0.1.6:11211"}, + {"469961", "10.0.1.4:11211"}, + {"470194", "10.0.1.5:11211"}, + {"470427", "10.0.1.6:11211"}, + {"470660", "10.0.1.1:11211"}, + {"470893", "10.0.1.4:11211"}, + {"471126", "10.0.1.7:11211"}, + {"471359", "10.0.1.7:11211"}, + {"471592", "10.0.1.1:11211"}, + {"471825", "10.0.1.5:11211"}, + {"472058", "10.0.1.6:11211"}, + {"472291", "10.0.1.7:11211"}, + {"472524", "10.0.1.8:11211"}, + {"472757", "10.0.1.2:11211"}, + {"472990", "10.0.1.2:11211"}, + {"473223", "10.0.1.7:11211"}, + {"473456", "10.0.1.7:11211"}, + {"473689", "10.0.1.5:11211"}, + {"473922", "10.0.1.7:11211"}, + {"474155", "10.0.1.6:11211"}, + {"474388", "10.0.1.8:11211"}, + {"474621", "10.0.1.3:11211"}, + {"474854", "10.0.1.1:11211"}, + {"475087", "10.0.1.1:11211"}, + {"475320", "10.0.1.4:11211"}, + {"475553", "10.0.1.6:11211"}, + {"475786", "10.0.1.4:11211"}, + {"476019", "10.0.1.5:11211"}, + {"476252", "10.0.1.1:11211"}, + {"476485", "10.0.1.4:11211"}, + {"476718", "10.0.1.6:11211"}, + {"476951", "10.0.1.8:11211"}, + {"477184", "10.0.1.8:11211"}, + {"477417", "10.0.1.3:11211"}, + {"477650", "10.0.1.1:11211"}, + {"477883", "10.0.1.1:11211"}, + {"478116", "10.0.1.2:11211"}, + {"478349", "10.0.1.5:11211"}, + {"478582", "10.0.1.4:11211"}, + {"478815", "10.0.1.4:11211"}, + {"479048", "10.0.1.1:11211"}, + {"479281", "10.0.1.1:11211"}, + {"479514", "10.0.1.2:11211"}, + {"479747", "10.0.1.5:11211"}, + {"479980", "10.0.1.7:11211"}, + {"480213", "10.0.1.3:11211"}, + {"480446", "10.0.1.1:11211"}, + {"480679", "10.0.1.4:11211"}, + {"480912", "10.0.1.6:11211"}, + {"481145", "10.0.1.2:11211"}, + {"481378", "10.0.1.1:11211"}, + {"481611", "10.0.1.7:11211"}, + {"481844", "10.0.1.3:11211"}, + {"482077", "10.0.1.4:11211"}, + {"482310", "10.0.1.4:11211"}, + {"482543", "10.0.1.3:11211"}, + {"482776", "10.0.1.8:11211"}, + {"483009", "10.0.1.7:11211"}, + {"483242", "10.0.1.6:11211"}, + {"483475", "10.0.1.3:11211"}, + {"483708", "10.0.1.5:11211"}, + {"483941", "10.0.1.8:11211"}, + {"484174", "10.0.1.7:11211"}, + {"484407", "10.0.1.2:11211"}, + {"484640", "10.0.1.8:11211"}, + {"484873", "10.0.1.8:11211"}, + {"485106", "10.0.1.4:11211"}, + {"485339", "10.0.1.2:11211"}, + {"485572", "10.0.1.6:11211"}, + {"485805", "10.0.1.5:11211"}, + {"486038", "10.0.1.3:11211"}, + {"486271", "10.0.1.5:11211"}, + {"486504", "10.0.1.1:11211"}, + {"486737", "10.0.1.5:11211"}, + {"486970", "10.0.1.3:11211"}, + {"487203", "10.0.1.2:11211"}, + {"487436", "10.0.1.8:11211"}, + {"487669", "10.0.1.3:11211"}, + {"487902", "10.0.1.7:11211"}, + {"488135", "10.0.1.2:11211"}, + {"488368", "10.0.1.7:11211"}, + {"488601", "10.0.1.8:11211"}, + {"488834", "10.0.1.8:11211"}, + {"489067", "10.0.1.5:11211"}, + {"489300", "10.0.1.4:11211"}, + {"489533", "10.0.1.5:11211"}, + {"489766", "10.0.1.5:11211"}, + {"489999", "10.0.1.3:11211"}, + {"490232", "10.0.1.4:11211"}, + {"490465", "10.0.1.2:11211"}, + {"490698", "10.0.1.1:11211"}, + {"490931", "10.0.1.2:11211"}, + {"491164", "10.0.1.5:11211"}, + {"491397", "10.0.1.5:11211"}, + {"491630", "10.0.1.2:11211"}, + {"491863", "10.0.1.1:11211"}, + {"492096", "10.0.1.1:11211"}, + {"492329", "10.0.1.5:11211"}, + {"492562", "10.0.1.7:11211"}, + {"492795", "10.0.1.3:11211"}, + {"493028", "10.0.1.1:11211"}, + {"493261", "10.0.1.2:11211"}, + {"493494", "10.0.1.3:11211"}, + {"493727", "10.0.1.6:11211"}, + {"493960", "10.0.1.5:11211"}, + {"494193", "10.0.1.6:11211"}, + {"494426", "10.0.1.6:11211"}, + {"494659", "10.0.1.4:11211"}, + {"494892", "10.0.1.4:11211"}, + {"495125", "10.0.1.1:11211"}, + {"495358", "10.0.1.3:11211"}, + {"495591", "10.0.1.6:11211"}, + {"495824", "10.0.1.5:11211"}, + {"496057", "10.0.1.7:11211"}, + {"496290", "10.0.1.5:11211"}, + {"496523", "10.0.1.5:11211"}, + {"496756", "10.0.1.1:11211"}, + {"496989", "10.0.1.3:11211"}, + {"497222", "10.0.1.8:11211"}, + {"497455", "10.0.1.6:11211"}, + {"497688", "10.0.1.7:11211"}, + {"497921", "10.0.1.5:11211"}, + {"498154", "10.0.1.4:11211"}, + {"498387", "10.0.1.3:11211"}, + {"498620", "10.0.1.3:11211"}, + {"498853", "10.0.1.3:11211"}, + {"499086", "10.0.1.1:11211"}, + {"499319", "10.0.1.8:11211"}, + {"499552", "10.0.1.3:11211"}, + {"499785", "10.0.1.2:11211"}, + {"500018", "10.0.1.5:11211"}, + {"500251", "10.0.1.8:11211"}, + {"500484", "10.0.1.7:11211"}, + {"500717", "10.0.1.4:11211"}, + {"500950", "10.0.1.5:11211"}, + {"501183", "10.0.1.4:11211"}, + {"501416", "10.0.1.4:11211"}, + {"501649", "10.0.1.2:11211"}, + {"501882", "10.0.1.1:11211"}, + {"502115", "10.0.1.3:11211"}, + {"502348", "10.0.1.3:11211"}, + {"502581", "10.0.1.8:11211"}, + {"502814", "10.0.1.1:11211"}, + {"503047", "10.0.1.8:11211"}, + {"503280", "10.0.1.7:11211"}, + {"503513", "10.0.1.1:11211"}, + {"503746", "10.0.1.1:11211"}, + {"503979", "10.0.1.8:11211"}, + {"504212", "10.0.1.3:11211"}, + {"504445", "10.0.1.2:11211"}, + {"504678", "10.0.1.2:11211"}, + {"504911", "10.0.1.3:11211"}, + {"505144", "10.0.1.5:11211"}, + {"505377", "10.0.1.6:11211"}, + {"505610", "10.0.1.1:11211"}, + {"505843", "10.0.1.6:11211"}, + {"506076", "10.0.1.1:11211"}, + {"506309", "10.0.1.2:11211"}, + {"506542", "10.0.1.3:11211"}, + {"506775", "10.0.1.8:11211"}, + {"507008", "10.0.1.2:11211"}, + {"507241", "10.0.1.3:11211"}, + {"507474", "10.0.1.6:11211"}, + {"507707", "10.0.1.7:11211"}, + {"507940", "10.0.1.4:11211"}, + {"508173", "10.0.1.5:11211"}, + {"508406", "10.0.1.1:11211"}, + {"508639", "10.0.1.3:11211"}, + {"508872", "10.0.1.4:11211"}, + {"509105", "10.0.1.7:11211"}, + {"509338", "10.0.1.2:11211"}, + {"509571", "10.0.1.3:11211"}, + {"509804", "10.0.1.3:11211"}, + {"510037", "10.0.1.7:11211"}, + {"510270", "10.0.1.4:11211"}, + {"510503", "10.0.1.1:11211"}, + {"510736", "10.0.1.3:11211"}, + {"510969", "10.0.1.6:11211"}, + {"511202", "10.0.1.2:11211"}, + {"511435", "10.0.1.2:11211"}, + {"511668", "10.0.1.4:11211"}, + {"511901", "10.0.1.6:11211"}, + {"512134", "10.0.1.2:11211"}, + {"512367", "10.0.1.1:11211"}, + {"512600", "10.0.1.1:11211"}, + {"512833", "10.0.1.3:11211"}, + {"513066", "10.0.1.3:11211"}, + {"513299", "10.0.1.3:11211"}, + {"513532", "10.0.1.2:11211"}, + {"513765", "10.0.1.8:11211"}, + {"513998", "10.0.1.7:11211"}, + {"514231", "10.0.1.6:11211"}, + {"514464", "10.0.1.3:11211"}, + {"514697", "10.0.1.3:11211"}, + {"514930", "10.0.1.6:11211"}, + {"515163", "10.0.1.7:11211"}, + {"515396", "10.0.1.8:11211"}, + {"515629", "10.0.1.5:11211"}, + {"515862", "10.0.1.7:11211"}, + {"516095", "10.0.1.2:11211"}, + {"516328", "10.0.1.8:11211"}, + {"516561", "10.0.1.3:11211"}, + {"516794", "10.0.1.5:11211"}, + {"517027", "10.0.1.6:11211"}, + {"517260", "10.0.1.3:11211"}, + {"517493", "10.0.1.5:11211"}, + {"517726", "10.0.1.8:11211"}, + {"517959", "10.0.1.7:11211"}, + {"518192", "10.0.1.8:11211"}, + {"518425", "10.0.1.8:11211"}, + {"518658", "10.0.1.8:11211"}, + {"518891", "10.0.1.7:11211"}, + {"519124", "10.0.1.3:11211"}, + {"519357", "10.0.1.6:11211"}, + {"519590", "10.0.1.5:11211"}, + {"519823", "10.0.1.3:11211"}, + {"520056", "10.0.1.2:11211"}, + {"520289", "10.0.1.8:11211"}, + {"520522", "10.0.1.5:11211"}, + {"533337", "10.0.1.3:11211"}, + {"533570", "10.0.1.3:11211"}, + {"533803", "10.0.1.2:11211"}, + {"534036", "10.0.1.4:11211"}, + {"534269", "10.0.1.1:11211"}, + {"534502", "10.0.1.2:11211"}, + {"534735", "10.0.1.4:11211"}, + {"534968", "10.0.1.8:11211"}, + {"535201", "10.0.1.1:11211"}, + {"535434", "10.0.1.1:11211"}, + {"535667", "10.0.1.3:11211"}, + {"535900", "10.0.1.7:11211"}, + {"536133", "10.0.1.8:11211"}, + {"541026", "10.0.1.1:11211"}, + {"541259", "10.0.1.2:11211"}, + {"541492", "10.0.1.6:11211"}, + {"541725", "10.0.1.8:11211"}, + {"541958", "10.0.1.1:11211"}, + {"542191", "10.0.1.6:11211"}, + {"542424", "10.0.1.7:11211"}, + {"542657", "10.0.1.2:11211"}, + {"542890", "10.0.1.4:11211"}, + {"543123", "10.0.1.5:11211"}, + {"543356", "10.0.1.3:11211"}, + {"543589", "10.0.1.7:11211"}, + {"547084", "10.0.1.2:11211"}, + {"547317", "10.0.1.3:11211"}, + {"547550", "10.0.1.7:11211"}, + {"547783", "10.0.1.7:11211"}, + {"548016", "10.0.1.2:11211"}, + {"548249", "10.0.1.5:11211"}, + {"548482", "10.0.1.7:11211"}, + {"548715", "10.0.1.6:11211"}, + {"548948", "10.0.1.4:11211"}, + {"549181", "10.0.1.3:11211"}, + {"549414", "10.0.1.3:11211"}, + {"549647", "10.0.1.4:11211"}, + {"549880", "10.0.1.7:11211"}, + {"550113", "10.0.1.6:11211"}, + {"550346", "10.0.1.4:11211"}, + {"550579", "10.0.1.6:11211"}, + {"550812", "10.0.1.4:11211"}, + {"551045", "10.0.1.5:11211"}, + {"551278", "10.0.1.6:11211"}, + {"551511", "10.0.1.5:11211"}, + {"551744", "10.0.1.6:11211"}, + {"551977", "10.0.1.8:11211"}, + {"552210", "10.0.1.2:11211"}, + {"552443", "10.0.1.2:11211"}, + {"552676", "10.0.1.1:11211"}, + {"552909", "10.0.1.4:11211"}, + {"553142", "10.0.1.7:11211"}, + {"553375", "10.0.1.2:11211"}, + {"553608", "10.0.1.5:11211"}, + {"553841", "10.0.1.5:11211"}, + {"554074", "10.0.1.5:11211"}, + {"554307", "10.0.1.7:11211"}, + {"554540", "10.0.1.6:11211"}, + {"554773", "10.0.1.3:11211"}, + {"555006", "10.0.1.3:11211"}, + {"555239", "10.0.1.5:11211"}, + {"555472", "10.0.1.8:11211"}, + {"555705", "10.0.1.8:11211"}, + {"555938", "10.0.1.6:11211"}, + {"556171", "10.0.1.4:11211"}, + {"556404", "10.0.1.4:11211"}, + {"556637", "10.0.1.8:11211"}, + {"556870", "10.0.1.5:11211"}, + {"557103", "10.0.1.3:11211"}, + {"557336", "10.0.1.3:11211"}, + {"557569", "10.0.1.8:11211"}, + {"557802", "10.0.1.1:11211"}, + {"558035", "10.0.1.1:11211"}, + {"558268", "10.0.1.4:11211"}, + {"558501", "10.0.1.3:11211"}, + {"558734", "10.0.1.6:11211"}, + {"558967", "10.0.1.7:11211"}, + {"559200", "10.0.1.3:11211"}, + {"559433", "10.0.1.1:11211"}, + {"559666", "10.0.1.4:11211"}, + {"559899", "10.0.1.2:11211"}, + {"560132", "10.0.1.2:11211"}, + {"560365", "10.0.1.6:11211"}, + {"560598", "10.0.1.8:11211"}, + {"560831", "10.0.1.3:11211"}, + {"561064", "10.0.1.7:11211"}, + {"561297", "10.0.1.1:11211"}, + {"561530", "10.0.1.7:11211"}, + {"561763", "10.0.1.7:11211"}, + {"561996", "10.0.1.1:11211"}, + {"562229", "10.0.1.8:11211"}, + {"562462", "10.0.1.6:11211"}, + {"562695", "10.0.1.7:11211"}, + {"562928", "10.0.1.2:11211"}, + {"563161", "10.0.1.8:11211"}, + {"563394", "10.0.1.8:11211"}, + {"563627", "10.0.1.6:11211"}, + {"563860", "10.0.1.2:11211"}, + {"564093", "10.0.1.5:11211"}, + {"564326", "10.0.1.4:11211"}, + {"564559", "10.0.1.8:11211"}, + {"564792", "10.0.1.1:11211"}, + {"565025", "10.0.1.3:11211"}, + {"565258", "10.0.1.7:11211"}, + {"565491", "10.0.1.7:11211"}, + {"565724", "10.0.1.5:11211"}, + {"565957", "10.0.1.4:11211"}, + {"566190", "10.0.1.4:11211"}, + {"566423", "10.0.1.8:11211"}, + {"566656", "10.0.1.4:11211"}, + {"566889", "10.0.1.3:11211"}, + {"567122", "10.0.1.1:11211"}, + {"567355", "10.0.1.8:11211"}, + {"567588", "10.0.1.6:11211"}, + {"567821", "10.0.1.3:11211"}, + {"568054", "10.0.1.5:11211"}, + {"568287", "10.0.1.8:11211"}, + {"568520", "10.0.1.8:11211"}, + {"568753", "10.0.1.4:11211"}, + {"568986", "10.0.1.5:11211"}, + {"569219", "10.0.1.8:11211"}, + {"569452", "10.0.1.6:11211"}, + {"569685", "10.0.1.1:11211"}, + {"569918", "10.0.1.3:11211"}, + {"570151", "10.0.1.4:11211"}, + {"570384", "10.0.1.4:11211"}, + {"570617", "10.0.1.8:11211"}, + {"570850", "10.0.1.5:11211"}, + {"571083", "10.0.1.8:11211"}, + {"571316", "10.0.1.3:11211"}, + {"571549", "10.0.1.3:11211"}, + {"571782", "10.0.1.2:11211"}, + {"572015", "10.0.1.6:11211"}, + {"572248", "10.0.1.3:11211"}, + {"572481", "10.0.1.7:11211"}, + {"572714", "10.0.1.6:11211"}, + {"572947", "10.0.1.8:11211"}, + {"573180", "10.0.1.7:11211"}, + {"573413", "10.0.1.1:11211"}, + {"573646", "10.0.1.1:11211"}, + {"573879", "10.0.1.1:11211"}, + {"574112", "10.0.1.1:11211"}, + {"574345", "10.0.1.1:11211"}, + {"574578", "10.0.1.6:11211"}, + {"574811", "10.0.1.5:11211"}, + {"575044", "10.0.1.2:11211"}, + {"575277", "10.0.1.1:11211"}, + {"575510", "10.0.1.1:11211"}, + {"575743", "10.0.1.5:11211"}, + {"575976", "10.0.1.4:11211"}, + {"576209", "10.0.1.6:11211"}, + {"576442", "10.0.1.5:11211"}, + {"576675", "10.0.1.2:11211"}, + {"576908", "10.0.1.3:11211"}, + {"577141", "10.0.1.7:11211"}, + {"577374", "10.0.1.5:11211"}, + {"577607", "10.0.1.5:11211"}, + {"577840", "10.0.1.5:11211"}, + {"578073", "10.0.1.1:11211"}, + {"578306", "10.0.1.2:11211"}, + {"578539", "10.0.1.7:11211"}, + {"578772", "10.0.1.5:11211"}, + {"579005", "10.0.1.6:11211"}, + {"579238", "10.0.1.4:11211"}, + {"579471", "10.0.1.2:11211"}, + {"579704", "10.0.1.7:11211"}, + {"579937", "10.0.1.6:11211"}, + {"580170", "10.0.1.4:11211"}, + {"585063", "10.0.1.1:11211"}, + {"585296", "10.0.1.6:11211"}, + {"585529", "10.0.1.4:11211"}, + {"585762", "10.0.1.4:11211"}, + {"585995", "10.0.1.2:11211"}, + {"586228", "10.0.1.7:11211"}, + {"586461", "10.0.1.8:11211"}, + {"586694", "10.0.1.3:11211"}, + {"586927", "10.0.1.7:11211"}, + {"587160", "10.0.1.4:11211"}, + {"587393", "10.0.1.8:11211"}, + {"587626", "10.0.1.7:11211"}, + {"587859", "10.0.1.7:11211"}, + {"588092", "10.0.1.1:11211"}, + {"588325", "10.0.1.8:11211"}, + {"588558", "10.0.1.3:11211"}, + {"588791", "10.0.1.1:11211"}, + {"589024", "10.0.1.5:11211"}, + {"589257", "10.0.1.4:11211"}, + {"589490", "10.0.1.1:11211"}, + {"589723", "10.0.1.8:11211"}, + {"589956", "10.0.1.8:11211"}, + {"590189", "10.0.1.1:11211"}, + {"590422", "10.0.1.8:11211"}, + {"590655", "10.0.1.4:11211"}, + {"590888", "10.0.1.8:11211"}, + {"591121", "10.0.1.1:11211"}, + {"591354", "10.0.1.6:11211"}, + {"591587", "10.0.1.7:11211"}, + {"591820", "10.0.1.2:11211"}, + {"592053", "10.0.1.7:11211"}, + {"592286", "10.0.1.8:11211"}, + {"592519", "10.0.1.8:11211"}, + {"592752", "10.0.1.5:11211"}, + {"592985", "10.0.1.5:11211"}, + {"593218", "10.0.1.5:11211"}, + {"593451", "10.0.1.8:11211"}, + {"593684", "10.0.1.2:11211"}, + {"593917", "10.0.1.3:11211"}, + {"594150", "10.0.1.5:11211"}, + {"594383", "10.0.1.8:11211"}, + {"594616", "10.0.1.8:11211"}, + {"594849", "10.0.1.2:11211"}, + {"595082", "10.0.1.3:11211"}, + {"595315", "10.0.1.7:11211"}, + {"595548", "10.0.1.8:11211"}, + {"595781", "10.0.1.2:11211"}, + {"596014", "10.0.1.5:11211"}, + {"596247", "10.0.1.3:11211"}, + {"596480", "10.0.1.3:11211"}, + {"596713", "10.0.1.3:11211"}, + {"596946", "10.0.1.8:11211"}, + {"597179", "10.0.1.1:11211"}, + {"597412", "10.0.1.4:11211"}, + {"597645", "10.0.1.3:11211"}, + {"597878", "10.0.1.4:11211"}, + {"598111", "10.0.1.3:11211"}, + {"598344", "10.0.1.3:11211"}, + {"598577", "10.0.1.7:11211"}, + {"598810", "10.0.1.3:11211"}, + {"599043", "10.0.1.1:11211"}, + {"599276", "10.0.1.7:11211"}, + {"599509", "10.0.1.5:11211"}, + {"599742", "10.0.1.7:11211"}, + {"599975", "10.0.1.3:11211"}, + {"600208", "10.0.1.4:11211"}, + {"600441", "10.0.1.1:11211"}, + {"600674", "10.0.1.8:11211"}, + {"600907", "10.0.1.6:11211"}, + {"601140", "10.0.1.7:11211"}, + {"601373", "10.0.1.2:11211"}, + {"601606", "10.0.1.5:11211"}, + {"601839", "10.0.1.6:11211"}, + {"602072", "10.0.1.2:11211"}, + {"602305", "10.0.1.3:11211"}, + {"602538", "10.0.1.3:11211"}, + {"602771", "10.0.1.3:11211"}, + {"603004", "10.0.1.3:11211"}, + {"603237", "10.0.1.8:11211"}, + {"603470", "10.0.1.5:11211"}, + {"603703", "10.0.1.7:11211"}, + {"603936", "10.0.1.4:11211"}, + {"604169", "10.0.1.7:11211"}, + {"604402", "10.0.1.2:11211"}, + {"604635", "10.0.1.3:11211"}, + {"604868", "10.0.1.5:11211"}, + {"605101", "10.0.1.5:11211"}, + {"614887", "10.0.1.2:11211"}, + {"615120", "10.0.1.2:11211"}, + {"615353", "10.0.1.8:11211"}, + {"615586", "10.0.1.6:11211"}, + {"615819", "10.0.1.5:11211"}, + {"616052", "10.0.1.3:11211"}, + {"616285", "10.0.1.1:11211"}, + {"616518", "10.0.1.8:11211"}, + {"616751", "10.0.1.5:11211"}, + {"616984", "10.0.1.8:11211"}, + {"617217", "10.0.1.3:11211"}, + {"617450", "10.0.1.4:11211"}, + {"617683", "10.0.1.1:11211"}, + {"617916", "10.0.1.7:11211"}, + {"618149", "10.0.1.8:11211"}, + {"618382", "10.0.1.7:11211"}, + {"618615", "10.0.1.7:11211"}, + {"618848", "10.0.1.4:11211"}, + {"619081", "10.0.1.1:11211"}, + {"619314", "10.0.1.8:11211"}, + {"619547", "10.0.1.3:11211"}, + {"619780", "10.0.1.1:11211"}, + {"620013", "10.0.1.7:11211"}, + {"620246", "10.0.1.5:11211"}, + {"620479", "10.0.1.3:11211"}, + {"620712", "10.0.1.7:11211"}, + {"625139", "10.0.1.1:11211"}, + {"625372", "10.0.1.8:11211"}, + {"625605", "10.0.1.7:11211"}, + {"625838", "10.0.1.2:11211"}, + {"626071", "10.0.1.5:11211"}, + {"626304", "10.0.1.3:11211"}, + {"626537", "10.0.1.5:11211"}, + {"626770", "10.0.1.1:11211"}, + {"627003", "10.0.1.8:11211"}, + {"627236", "10.0.1.4:11211"}, + {"627469", "10.0.1.4:11211"}, + {"627702", "10.0.1.8:11211"}, + {"627935", "10.0.1.1:11211"}, + {"628168", "10.0.1.6:11211"}, + {"628401", "10.0.1.4:11211"}, + {"628634", "10.0.1.4:11211"}, + {"628867", "10.0.1.3:11211"}, + {"629100", "10.0.1.5:11211"}, + {"629333", "10.0.1.4:11211"}, + {"629566", "10.0.1.4:11211"}, + {"629799", "10.0.1.2:11211"}, + {"630032", "10.0.1.5:11211"}, + {"630265", "10.0.1.8:11211"}, + {"630498", "10.0.1.3:11211"}, + {"630731", "10.0.1.6:11211"}, + {"630964", "10.0.1.6:11211"}, + {"631197", "10.0.1.2:11211"}, + {"631430", "10.0.1.3:11211"}, + {"631663", "10.0.1.7:11211"}, + {"631896", "10.0.1.4:11211"}, + {"632129", "10.0.1.2:11211"}, + {"632362", "10.0.1.7:11211"}, + {"632595", "10.0.1.6:11211"}, + {"632828", "10.0.1.3:11211"}, + {"633061", "10.0.1.8:11211"}, + {"633294", "10.0.1.5:11211"}, + {"633527", "10.0.1.3:11211"}, + {"633760", "10.0.1.6:11211"}, + {"633993", "10.0.1.7:11211"}, + {"634226", "10.0.1.2:11211"}, + {"634459", "10.0.1.1:11211"}, + {"634692", "10.0.1.2:11211"}, + {"634925", "10.0.1.6:11211"}, + {"635158", "10.0.1.4:11211"}, + {"635391", "10.0.1.4:11211"}, + {"635624", "10.0.1.4:11211"}, + {"635857", "10.0.1.5:11211"}, + {"636090", "10.0.1.2:11211"}, + {"636323", "10.0.1.3:11211"}, + {"636556", "10.0.1.1:11211"}, + {"636789", "10.0.1.4:11211"}, + {"637022", "10.0.1.1:11211"}, + {"637255", "10.0.1.4:11211"}, + {"637488", "10.0.1.3:11211"}, + {"637721", "10.0.1.3:11211"}, + {"637954", "10.0.1.2:11211"}, + {"638187", "10.0.1.2:11211"}, + {"638420", "10.0.1.5:11211"}, + {"638653", "10.0.1.5:11211"}, + {"638886", "10.0.1.2:11211"}, + {"639119", "10.0.1.6:11211"}, + {"639352", "10.0.1.6:11211"}, + {"639585", "10.0.1.2:11211"}, + {"639818", "10.0.1.7:11211"}, + {"640051", "10.0.1.3:11211"}, + {"640284", "10.0.1.3:11211"}, + {"640517", "10.0.1.7:11211"}, + {"640750", "10.0.1.4:11211"}, + {"640983", "10.0.1.3:11211"}, + {"641216", "10.0.1.2:11211"}, + {"641449", "10.0.1.1:11211"}, + {"641682", "10.0.1.4:11211"}, + {"641915", "10.0.1.5:11211"}, + {"642148", "10.0.1.1:11211"}, + {"642381", "10.0.1.2:11211"}, + {"642614", "10.0.1.7:11211"}, + {"642847", "10.0.1.7:11211"}, + {"643080", "10.0.1.3:11211"}, + {"643313", "10.0.1.6:11211"}, + {"643546", "10.0.1.4:11211"}, + {"643779", "10.0.1.3:11211"}, + {"644012", "10.0.1.5:11211"}, + {"644245", "10.0.1.3:11211"}, + {"644478", "10.0.1.8:11211"}, + {"644711", "10.0.1.8:11211"}, + {"644944", "10.0.1.2:11211"}, + {"645177", "10.0.1.8:11211"}, + {"645410", "10.0.1.6:11211"}, + {"645643", "10.0.1.2:11211"}, + {"645876", "10.0.1.4:11211"}, + {"646109", "10.0.1.4:11211"}, + {"646342", "10.0.1.1:11211"}, + {"646575", "10.0.1.3:11211"}, + {"646808", "10.0.1.3:11211"}, + {"647041", "10.0.1.4:11211"}, + {"647274", "10.0.1.5:11211"}, + {"647507", "10.0.1.7:11211"}, + {"647740", "10.0.1.1:11211"}, + {"647973", "10.0.1.7:11211"}, + {"648206", "10.0.1.7:11211"}, + {"648439", "10.0.1.8:11211"}, + {"648672", "10.0.1.4:11211"}, + {"648905", "10.0.1.4:11211"}, + {"649138", "10.0.1.5:11211"}, + {"649371", "10.0.1.6:11211"}, + {"649604", "10.0.1.3:11211"}, + {"649837", "10.0.1.7:11211"}, + {"650070", "10.0.1.5:11211"}, + {"650303", "10.0.1.4:11211"}, + {"650536", "10.0.1.8:11211"}, + {"650769", "10.0.1.8:11211"}, + {"651002", "10.0.1.8:11211"}, + {"651235", "10.0.1.4:11211"}, + {"651468", "10.0.1.1:11211"}, + {"651701", "10.0.1.3:11211"}, + {"651934", "10.0.1.4:11211"}, + {"652167", "10.0.1.4:11211"}, + {"652400", "10.0.1.7:11211"}, + {"652633", "10.0.1.4:11211"}, + {"652866", "10.0.1.3:11211"}, + {"653099", "10.0.1.1:11211"}, + {"653332", "10.0.1.8:11211"}, + {"653565", "10.0.1.2:11211"}, + {"653798", "10.0.1.2:11211"}, + {"654031", "10.0.1.6:11211"}, + {"654264", "10.0.1.2:11211"}, + {"654497", "10.0.1.4:11211"}, + {"654730", "10.0.1.7:11211"}, + {"654963", "10.0.1.8:11211"}, + {"655196", "10.0.1.1:11211"}, + {"655429", "10.0.1.8:11211"}, + {"655662", "10.0.1.5:11211"}, + {"655895", "10.0.1.7:11211"}, + {"656128", "10.0.1.3:11211"}, + {"656361", "10.0.1.8:11211"}, + {"656594", "10.0.1.5:11211"}, + {"656827", "10.0.1.6:11211"}, + {"657060", "10.0.1.3:11211"}, + {"657293", "10.0.1.2:11211"}, + {"657526", "10.0.1.5:11211"}, + {"657759", "10.0.1.2:11211"}, + {"657992", "10.0.1.4:11211"}, + {"658225", "10.0.1.5:11211"}, + {"658458", "10.0.1.8:11211"}, + {"658691", "10.0.1.1:11211"}, + {"658924", "10.0.1.4:11211"}, + {"659157", "10.0.1.2:11211"}, + {"659390", "10.0.1.2:11211"}, + {"659623", "10.0.1.2:11211"}, + {"659856", "10.0.1.3:11211"}, + {"660089", "10.0.1.8:11211"}, + {"660322", "10.0.1.7:11211"}, + {"660555", "10.0.1.4:11211"}, + {"660788", "10.0.1.7:11211"}, + {"661021", "10.0.1.1:11211"}, + {"661254", "10.0.1.7:11211"}, + {"661487", "10.0.1.2:11211"}, + {"661720", "10.0.1.5:11211"}, + {"661953", "10.0.1.7:11211"}, + {"662186", "10.0.1.1:11211"}, + {"662419", "10.0.1.1:11211"}, + {"662652", "10.0.1.3:11211"}, + {"662885", "10.0.1.7:11211"}, + {"677564", "10.0.1.5:11211"}, + {"677797", "10.0.1.7:11211"}, + {"678030", "10.0.1.6:11211"}, + {"678263", "10.0.1.8:11211"}, + {"678496", "10.0.1.8:11211"}, + {"678729", "10.0.1.6:11211"}, + {"678962", "10.0.1.1:11211"}, + {"679195", "10.0.1.7:11211"}, + {"679428", "10.0.1.6:11211"}, + {"679661", "10.0.1.5:11211"}, + {"679894", "10.0.1.8:11211"}, + {"680127", "10.0.1.5:11211"}, + {"680360", "10.0.1.1:11211"}, + {"680593", "10.0.1.8:11211"}, + {"680826", "10.0.1.7:11211"}, + {"681059", "10.0.1.5:11211"}, + {"681292", "10.0.1.7:11211"}, + {"681525", "10.0.1.3:11211"}, + {"681758", "10.0.1.3:11211"}, + {"685952", "10.0.1.4:11211"}, + {"686185", "10.0.1.6:11211"}, + {"686418", "10.0.1.5:11211"}, + {"686651", "10.0.1.3:11211"}, + {"686884", "10.0.1.8:11211"}, + {"687117", "10.0.1.6:11211"}, + {"687350", "10.0.1.4:11211"}, + {"687583", "10.0.1.8:11211"}, + {"687816", "10.0.1.7:11211"}, + {"688049", "10.0.1.7:11211"}, + {"688282", "10.0.1.5:11211"}, + {"688515", "10.0.1.6:11211"}, + {"688748", "10.0.1.4:11211"}, + {"688981", "10.0.1.1:11211"}, + {"689214", "10.0.1.5:11211"}, + {"689447", "10.0.1.3:11211"}, + {"689680", "10.0.1.8:11211"}, + {"689913", "10.0.1.8:11211"}, + {"690146", "10.0.1.6:11211"}, + {"690379", "10.0.1.8:11211"}, + {"690612", "10.0.1.2:11211"}, + {"690845", "10.0.1.6:11211"}, + {"691078", "10.0.1.2:11211"}, + {"691311", "10.0.1.7:11211"}, + {"691544", "10.0.1.8:11211"}, + {"691777", "10.0.1.5:11211"}, + {"692010", "10.0.1.7:11211"}, + {"692243", "10.0.1.4:11211"}, + {"692476", "10.0.1.3:11211"}, + {"692709", "10.0.1.3:11211"}, + {"692942", "10.0.1.1:11211"}, + {"693175", "10.0.1.5:11211"}, + {"693408", "10.0.1.5:11211"}, + {"693641", "10.0.1.6:11211"}, + {"693874", "10.0.1.3:11211"}, + {"694107", "10.0.1.3:11211"}, + {"694340", "10.0.1.5:11211"}, + {"694573", "10.0.1.4:11211"}, + {"694806", "10.0.1.6:11211"}, + {"695039", "10.0.1.1:11211"}, + {"695272", "10.0.1.6:11211"}, + {"695505", "10.0.1.3:11211"}, + {"695738", "10.0.1.6:11211"}, + {"695971", "10.0.1.8:11211"}, + {"696204", "10.0.1.2:11211"}, + {"696437", "10.0.1.1:11211"}, + {"696670", "10.0.1.6:11211"}, + {"696903", "10.0.1.7:11211"}, + {"697136", "10.0.1.2:11211"}, + {"697369", "10.0.1.6:11211"}, + {"697602", "10.0.1.2:11211"}, + {"697835", "10.0.1.4:11211"}, + {"698068", "10.0.1.8:11211"}, + {"698301", "10.0.1.5:11211"}, + {"698534", "10.0.1.4:11211"}, + {"698767", "10.0.1.6:11211"}, + {"699000", "10.0.1.7:11211"}, + {"699233", "10.0.1.4:11211"}, + {"699466", "10.0.1.5:11211"}, + {"699699", "10.0.1.3:11211"}, + {"699932", "10.0.1.2:11211"}, + {"700165", "10.0.1.6:11211"}, + {"700398", "10.0.1.1:11211"}, + {"700631", "10.0.1.1:11211"}, + {"700864", "10.0.1.3:11211"}, + {"701097", "10.0.1.4:11211"}, + {"701330", "10.0.1.7:11211"}, + {"701563", "10.0.1.6:11211"}, + {"701796", "10.0.1.8:11211"}, + {"702029", "10.0.1.4:11211"}, + {"702262", "10.0.1.4:11211"}, + {"702495", "10.0.1.5:11211"}, + {"702728", "10.0.1.7:11211"}, + {"702961", "10.0.1.4:11211"}, + {"703194", "10.0.1.5:11211"}, + {"703427", "10.0.1.7:11211"}, + {"703660", "10.0.1.1:11211"}, + {"703893", "10.0.1.6:11211"}, + {"704126", "10.0.1.4:11211"}, + {"704359", "10.0.1.1:11211"}, + {"704592", "10.0.1.4:11211"}, + {"704825", "10.0.1.2:11211"}, + {"705058", "10.0.1.8:11211"}, + {"705291", "10.0.1.6:11211"}, + {"705524", "10.0.1.3:11211"}, + {"705757", "10.0.1.7:11211"}, + {"705990", "10.0.1.7:11211"}, + {"706223", "10.0.1.8:11211"}, + {"706456", "10.0.1.7:11211"}, + {"706689", "10.0.1.8:11211"}, + {"706922", "10.0.1.7:11211"}, + {"707155", "10.0.1.8:11211"}, + {"707388", "10.0.1.4:11211"}, + {"707621", "10.0.1.8:11211"}, + {"707854", "10.0.1.8:11211"}, + {"708087", "10.0.1.7:11211"}, + {"708320", "10.0.1.5:11211"}, + {"708553", "10.0.1.6:11211"}, + {"708786", "10.0.1.3:11211"}, + {"709019", "10.0.1.7:11211"}, + {"709252", "10.0.1.4:11211"}, + {"709485", "10.0.1.8:11211"}, + {"709718", "10.0.1.1:11211"}, + {"709951", "10.0.1.3:11211"}, + {"710184", "10.0.1.1:11211"}, + {"710417", "10.0.1.6:11211"}, + {"710650", "10.0.1.1:11211"}, + {"710883", "10.0.1.7:11211"}, + {"711116", "10.0.1.4:11211"}, + {"711349", "10.0.1.7:11211"}, + {"711582", "10.0.1.1:11211"}, + {"711815", "10.0.1.7:11211"}, + {"712048", "10.0.1.6:11211"}, + {"712281", "10.0.1.7:11211"}, + {"712514", "10.0.1.2:11211"}, + {"712747", "10.0.1.3:11211"}, + {"712980", "10.0.1.1:11211"}, + {"713213", "10.0.1.8:11211"}, + {"713446", "10.0.1.2:11211"}, + {"713679", "10.0.1.8:11211"}, + {"713912", "10.0.1.3:11211"}, + {"714145", "10.0.1.3:11211"}, + {"714378", "10.0.1.6:11211"}, + {"714611", "10.0.1.8:11211"}, + {"714844", "10.0.1.3:11211"}, + {"715077", "10.0.1.8:11211"}, + {"715310", "10.0.1.3:11211"}, + {"715543", "10.0.1.7:11211"}, + {"715776", "10.0.1.2:11211"}, + {"716009", "10.0.1.5:11211"}, + {"716242", "10.0.1.7:11211"}, + {"716475", "10.0.1.8:11211"}, + {"716708", "10.0.1.7:11211"}, + {"716941", "10.0.1.6:11211"}, + {"717174", "10.0.1.3:11211"}, + {"717407", "10.0.1.2:11211"}, + {"717640", "10.0.1.3:11211"}, + {"717873", "10.0.1.3:11211"}, + {"718106", "10.0.1.2:11211"}, + {"718339", "10.0.1.8:11211"}, + {"718572", "10.0.1.5:11211"}, + {"718805", "10.0.1.5:11211"}, + {"719038", "10.0.1.8:11211"}, + {"719271", "10.0.1.5:11211"}, + {"719504", "10.0.1.3:11211"}, + {"719737", "10.0.1.5:11211"}, + {"719970", "10.0.1.6:11211"}, + {"720203", "10.0.1.7:11211"}, + {"720436", "10.0.1.2:11211"}, + {"720669", "10.0.1.7:11211"}, + {"720902", "10.0.1.7:11211"}, + {"721135", "10.0.1.2:11211"}, + {"721368", "10.0.1.1:11211"}, + {"721601", "10.0.1.2:11211"}, + {"721834", "10.0.1.3:11211"}, + {"722067", "10.0.1.5:11211"}, + {"722300", "10.0.1.8:11211"}, + {"722533", "10.0.1.1:11211"}, + {"722766", "10.0.1.4:11211"}, + {"722999", "10.0.1.8:11211"}, + {"723232", "10.0.1.8:11211"}, + {"723465", "10.0.1.8:11211"}, + {"723698", "10.0.1.2:11211"}, + {"723931", "10.0.1.6:11211"}, + {"724164", "10.0.1.5:11211"}, + {"724397", "10.0.1.8:11211"}, + {"724630", "10.0.1.5:11211"}, + {"724863", "10.0.1.4:11211"}, + {"725096", "10.0.1.4:11211"}, + {"725329", "10.0.1.8:11211"}, + {"725562", "10.0.1.1:11211"}, + {"729290", "10.0.1.7:11211"}, + {"729523", "10.0.1.1:11211"}, + {"729756", "10.0.1.1:11211"}, + {"729989", "10.0.1.5:11211"}, + {"730222", "10.0.1.3:11211"}, + {"730455", "10.0.1.3:11211"}, + {"730688", "10.0.1.6:11211"}, + {"730921", "10.0.1.1:11211"}, + {"731154", "10.0.1.4:11211"}, + {"731387", "10.0.1.1:11211"}, + {"731620", "10.0.1.7:11211"}, + {"731853", "10.0.1.3:11211"}, + {"732086", "10.0.1.7:11211"}, + {"732319", "10.0.1.7:11211"}, + {"732552", "10.0.1.6:11211"}, + {"732785", "10.0.1.2:11211"}, + {"733018", "10.0.1.5:11211"}, + {"733251", "10.0.1.8:11211"}, + {"733484", "10.0.1.3:11211"}, + {"733717", "10.0.1.5:11211"}, + {"733950", "10.0.1.5:11211"}, + {"734183", "10.0.1.1:11211"}, + {"734416", "10.0.1.7:11211"}, + {"734649", "10.0.1.4:11211"}, + {"734882", "10.0.1.8:11211"}, + {"735115", "10.0.1.4:11211"}, + {"735348", "10.0.1.1:11211"}, + {"735581", "10.0.1.7:11211"}, + {"735814", "10.0.1.2:11211"}, + {"736047", "10.0.1.1:11211"}, + {"736280", "10.0.1.3:11211"}, + {"736513", "10.0.1.3:11211"}, + {"736746", "10.0.1.6:11211"}, + {"736979", "10.0.1.3:11211"}, + {"737212", "10.0.1.6:11211"}, + {"737445", "10.0.1.4:11211"}, + {"737678", "10.0.1.4:11211"}, + {"737911", "10.0.1.4:11211"}, + {"740474", "10.0.1.7:11211"}, + {"740707", "10.0.1.2:11211"}, + {"740940", "10.0.1.1:11211"}, + {"741173", "10.0.1.2:11211"}, + {"741406", "10.0.1.5:11211"}, + {"741639", "10.0.1.8:11211"}, + {"741872", "10.0.1.5:11211"}, + {"742105", "10.0.1.6:11211"}, + {"742338", "10.0.1.7:11211"}, + {"742571", "10.0.1.1:11211"}, + {"742804", "10.0.1.7:11211"}, + {"743037", "10.0.1.7:11211"}, + {"743270", "10.0.1.1:11211"}, + {"743503", "10.0.1.1:11211"}, + {"743736", "10.0.1.5:11211"}, + {"743969", "10.0.1.6:11211"}, + {"744202", "10.0.1.7:11211"}, + {"744435", "10.0.1.2:11211"}, + {"744668", "10.0.1.2:11211"}, + {"744901", "10.0.1.5:11211"}, + {"745134", "10.0.1.1:11211"}, + {"745367", "10.0.1.7:11211"}, + {"745600", "10.0.1.7:11211"}, + {"745833", "10.0.1.7:11211"}, + {"746066", "10.0.1.6:11211"}, + {"746299", "10.0.1.5:11211"}, + {"746532", "10.0.1.3:11211"}, + {"746765", "10.0.1.6:11211"}, + {"746998", "10.0.1.3:11211"}, + {"747231", "10.0.1.7:11211"}, + {"747464", "10.0.1.1:11211"}, + {"747697", "10.0.1.6:11211"}, + {"747930", "10.0.1.3:11211"}, + {"748163", "10.0.1.6:11211"}, + {"748396", "10.0.1.7:11211"}, + {"748629", "10.0.1.6:11211"}, + {"748862", "10.0.1.2:11211"}, + {"749095", "10.0.1.4:11211"}, + {"749328", "10.0.1.7:11211"}, + {"749561", "10.0.1.4:11211"}, + {"749794", "10.0.1.5:11211"}, + {"750027", "10.0.1.3:11211"}, + {"750260", "10.0.1.6:11211"}, + {"750493", "10.0.1.5:11211"}, + {"750726", "10.0.1.4:11211"}, + {"750959", "10.0.1.1:11211"}, + {"751192", "10.0.1.1:11211"}, + {"751425", "10.0.1.6:11211"}, + {"751658", "10.0.1.3:11211"}, + {"751891", "10.0.1.6:11211"}, + {"752124", "10.0.1.6:11211"}, + {"752357", "10.0.1.5:11211"}, + {"752590", "10.0.1.3:11211"}, + {"752823", "10.0.1.2:11211"}, + {"753056", "10.0.1.1:11211"}, + {"753289", "10.0.1.7:11211"}, + {"753522", "10.0.1.8:11211"}, + {"753755", "10.0.1.3:11211"}, + {"753988", "10.0.1.2:11211"}, + {"754221", "10.0.1.8:11211"}, + {"754454", "10.0.1.1:11211"}, + {"754687", "10.0.1.7:11211"}, + {"754920", "10.0.1.6:11211"}, + {"755153", "10.0.1.2:11211"}, + {"755386", "10.0.1.1:11211"}, + {"755619", "10.0.1.7:11211"}, + {"755852", "10.0.1.3:11211"}, + {"756085", "10.0.1.5:11211"}, + {"756318", "10.0.1.6:11211"}, + {"756551", "10.0.1.3:11211"}, + {"756784", "10.0.1.7:11211"}, + {"757017", "10.0.1.7:11211"}, + {"757250", "10.0.1.2:11211"}, + {"757483", "10.0.1.2:11211"}, + {"757716", "10.0.1.3:11211"}, + {"757949", "10.0.1.2:11211"}, + {"758182", "10.0.1.3:11211"}, + {"758415", "10.0.1.4:11211"}, + {"758648", "10.0.1.6:11211"}, + {"758881", "10.0.1.6:11211"}, + {"759114", "10.0.1.5:11211"}, + {"759347", "10.0.1.8:11211"}, + {"759580", "10.0.1.8:11211"}, + {"759813", "10.0.1.1:11211"}, + {"760046", "10.0.1.1:11211"}, + {"760279", "10.0.1.3:11211"}, + {"760512", "10.0.1.6:11211"}, + {"760745", "10.0.1.8:11211"}, + {"760978", "10.0.1.6:11211"}, + {"761211", "10.0.1.4:11211"}, + {"761444", "10.0.1.7:11211"}, + {"761677", "10.0.1.1:11211"}, + {"761910", "10.0.1.4:11211"}, + {"762143", "10.0.1.7:11211"}, + {"762376", "10.0.1.7:11211"}, + {"762609", "10.0.1.2:11211"}, + {"762842", "10.0.1.6:11211"}, + {"763075", "10.0.1.7:11211"}, + {"763308", "10.0.1.3:11211"}, + {"763541", "10.0.1.7:11211"}, + {"763774", "10.0.1.5:11211"}, + {"764007", "10.0.1.3:11211"}, + {"764240", "10.0.1.6:11211"}, + {"764473", "10.0.1.4:11211"}, + {"764706", "10.0.1.3:11211"}, + {"764939", "10.0.1.2:11211"}, + {"765172", "10.0.1.8:11211"}, + {"765405", "10.0.1.2:11211"}, + {"765638", "10.0.1.6:11211"}, + {"765871", "10.0.1.1:11211"}, + {"766104", "10.0.1.1:11211"}, + {"766337", "10.0.1.5:11211"}, + {"766570", "10.0.1.4:11211"}, + {"766803", "10.0.1.5:11211"}, + {"767036", "10.0.1.6:11211"}, + {"767269", "10.0.1.3:11211"}, + {"767502", "10.0.1.5:11211"}, + {"767735", "10.0.1.2:11211"}, + {"767968", "10.0.1.3:11211"}, + {"768201", "10.0.1.7:11211"}, + {"777521", "10.0.1.2:11211"}, + {"777754", "10.0.1.4:11211"}, + {"777987", "10.0.1.2:11211"}, + {"778220", "10.0.1.3:11211"}, + {"778453", "10.0.1.6:11211"}, + {"778686", "10.0.1.8:11211"}, + {"778919", "10.0.1.1:11211"}, + {"779152", "10.0.1.1:11211"}, + {"779385", "10.0.1.3:11211"}, + {"779618", "10.0.1.5:11211"}, + {"779851", "10.0.1.8:11211"}, + {"780084", "10.0.1.7:11211"}, + {"780317", "10.0.1.2:11211"}, + {"780550", "10.0.1.1:11211"}, + {"780783", "10.0.1.2:11211"}, + {"781016", "10.0.1.3:11211"}, + {"781249", "10.0.1.6:11211"}, + {"781482", "10.0.1.1:11211"}, + {"781715", "10.0.1.7:11211"}, + {"781948", "10.0.1.3:11211"}, + {"782181", "10.0.1.6:11211"}, + {"782414", "10.0.1.2:11211"}, + {"782647", "10.0.1.5:11211"}, + {"782880", "10.0.1.5:11211"}, + {"783113", "10.0.1.4:11211"}, + {"783346", "10.0.1.8:11211"}, + {"783579", "10.0.1.8:11211"}, + {"783812", "10.0.1.1:11211"}, + {"784045", "10.0.1.5:11211"}, + {"784278", "10.0.1.1:11211"}, + {"784511", "10.0.1.8:11211"}, + {"784744", "10.0.1.8:11211"}, + {"784977", "10.0.1.1:11211"}, + {"785210", "10.0.1.7:11211"}, + {"785443", "10.0.1.2:11211"}, + {"785676", "10.0.1.8:11211"}, + {"785909", "10.0.1.3:11211"}, + {"786142", "10.0.1.8:11211"}, + {"786375", "10.0.1.8:11211"}, + {"786608", "10.0.1.2:11211"}, + {"786841", "10.0.1.1:11211"}, + {"787074", "10.0.1.8:11211"}, + {"787307", "10.0.1.7:11211"}, + {"787540", "10.0.1.2:11211"}, + {"787773", "10.0.1.3:11211"}, + {"788006", "10.0.1.8:11211"}, + {"788239", "10.0.1.1:11211"}, + {"788472", "10.0.1.2:11211"}, + {"788705", "10.0.1.6:11211"}, + {"788938", "10.0.1.7:11211"}, + {"789171", "10.0.1.7:11211"}, + {"789404", "10.0.1.2:11211"}, + {"789637", "10.0.1.8:11211"}, + {"789870", "10.0.1.3:11211"}, + {"790103", "10.0.1.8:11211"}, + {"790336", "10.0.1.6:11211"}, + {"790569", "10.0.1.5:11211"}, + {"790802", "10.0.1.6:11211"}, + {"791035", "10.0.1.7:11211"}, + {"791268", "10.0.1.7:11211"}, + {"791501", "10.0.1.7:11211"}, + {"791734", "10.0.1.8:11211"}, + {"791967", "10.0.1.6:11211"}, + {"792200", "10.0.1.3:11211"}, + {"792433", "10.0.1.6:11211"}, + {"792666", "10.0.1.7:11211"}, + {"792899", "10.0.1.2:11211"}, + {"793132", "10.0.1.1:11211"}, + {"793365", "10.0.1.5:11211"}, + {"793598", "10.0.1.2:11211"}, + {"793831", "10.0.1.6:11211"}, + {"794064", "10.0.1.3:11211"}, + {"794297", "10.0.1.1:11211"}, + {"794530", "10.0.1.1:11211"}, + {"794763", "10.0.1.7:11211"}, + {"794996", "10.0.1.6:11211"}, + {"795229", "10.0.1.5:11211"}, + {"795462", "10.0.1.1:11211"}, + {"795695", "10.0.1.8:11211"}, + {"795928", "10.0.1.4:11211"}, + {"796161", "10.0.1.3:11211"}, + {"796394", "10.0.1.7:11211"}, + {"796627", "10.0.1.5:11211"}, + {"796860", "10.0.1.8:11211"}, + {"797093", "10.0.1.7:11211"}, + {"797326", "10.0.1.3:11211"}, + {"797559", "10.0.1.3:11211"}, + {"797792", "10.0.1.8:11211"}, + {"798025", "10.0.1.4:11211"}, + {"798258", "10.0.1.3:11211"}, + {"798491", "10.0.1.3:11211"}, + {"798724", "10.0.1.8:11211"}, + {"798957", "10.0.1.2:11211"}, + {"799190", "10.0.1.5:11211"}, + {"799423", "10.0.1.2:11211"}, + {"799656", "10.0.1.6:11211"}, + {"799889", "10.0.1.5:11211"}, + {"800122", "10.0.1.5:11211"}, + {"800355", "10.0.1.3:11211"}, + {"800588", "10.0.1.6:11211"}, + {"800821", "10.0.1.5:11211"}, + {"801054", "10.0.1.3:11211"}, + {"801287", "10.0.1.2:11211"}, + {"801520", "10.0.1.2:11211"}, + {"801753", "10.0.1.8:11211"}, + {"801986", "10.0.1.2:11211"}, + {"802219", "10.0.1.1:11211"}, + {"802452", "10.0.1.2:11211"}, + {"802685", "10.0.1.7:11211"}, + {"802918", "10.0.1.8:11211"}, + {"803151", "10.0.1.6:11211"}, + {"803384", "10.0.1.7:11211"}, + {"803617", "10.0.1.7:11211"}, + {"803850", "10.0.1.3:11211"}, + {"804083", "10.0.1.8:11211"}, + {"804316", "10.0.1.8:11211"}, + {"804549", "10.0.1.6:11211"}, + {"804782", "10.0.1.4:11211"}, + {"805015", "10.0.1.7:11211"}, + {"805248", "10.0.1.5:11211"}, + {"805481", "10.0.1.2:11211"}, + {"805714", "10.0.1.7:11211"}, + {"805947", "10.0.1.6:11211"}, + {"806180", "10.0.1.2:11211"}, + {"806413", "10.0.1.3:11211"}, + {"806646", "10.0.1.2:11211"}, + {"806879", "10.0.1.5:11211"}, + {"807112", "10.0.1.8:11211"}, + {"807345", "10.0.1.1:11211"}, + {"807578", "10.0.1.7:11211"}, + {"807811", "10.0.1.5:11211"}, + {"808044", "10.0.1.2:11211"}, + {"808277", "10.0.1.7:11211"}, + {"808510", "10.0.1.5:11211"}, + {"808743", "10.0.1.8:11211"}, + {"811772", "10.0.1.5:11211"}, + {"812005", "10.0.1.7:11211"}, + {"812238", "10.0.1.1:11211"}, + {"812471", "10.0.1.2:11211"}, + {"815500", "10.0.1.5:11211"}, + {"815733", "10.0.1.6:11211"}, + {"815966", "10.0.1.8:11211"}, + {"816199", "10.0.1.8:11211"}, + {"816432", "10.0.1.2:11211"}, + {"816665", "10.0.1.4:11211"}, + {"816898", "10.0.1.1:11211"}, + {"817131", "10.0.1.1:11211"}, + {"817364", "10.0.1.7:11211"}, + {"817597", "10.0.1.3:11211"}, + {"817830", "10.0.1.2:11211"}, + {"818063", "10.0.1.7:11211"}, + {"818296", "10.0.1.6:11211"}, + {"818529", "10.0.1.4:11211"}, + {"818762", "10.0.1.3:11211"}, + {"818995", "10.0.1.2:11211"}, + {"819228", "10.0.1.4:11211"}, + {"819461", "10.0.1.7:11211"}, + {"819694", "10.0.1.5:11211"}, + {"819927", "10.0.1.6:11211"}, + {"820160", "10.0.1.5:11211"}, + {"820393", "10.0.1.2:11211"}, + {"820626", "10.0.1.1:11211"}, + {"820859", "10.0.1.6:11211"}, + {"821092", "10.0.1.8:11211"}, + {"821325", "10.0.1.2:11211"}, + {"821558", "10.0.1.6:11211"}, + {"821791", "10.0.1.2:11211"}, + {"822024", "10.0.1.1:11211"}, + {"822257", "10.0.1.3:11211"}, + {"822490", "10.0.1.1:11211"}, + {"822723", "10.0.1.6:11211"}, + {"822956", "10.0.1.1:11211"}, + {"823189", "10.0.1.7:11211"}, + {"823422", "10.0.1.2:11211"}, + {"823655", "10.0.1.3:11211"}, + {"823888", "10.0.1.7:11211"}, + {"824121", "10.0.1.2:11211"}, + {"824354", "10.0.1.6:11211"}, + {"824587", "10.0.1.7:11211"}, + {"824820", "10.0.1.8:11211"}, + {"825053", "10.0.1.5:11211"}, + {"825286", "10.0.1.4:11211"}, + {"825519", "10.0.1.8:11211"}, + {"825752", "10.0.1.8:11211"}, + {"825985", "10.0.1.8:11211"}, + {"826218", "10.0.1.8:11211"}, + {"826451", "10.0.1.4:11211"}, + {"826684", "10.0.1.7:11211"}, + {"826917", "10.0.1.5:11211"}, + {"827150", "10.0.1.3:11211"}, + {"827383", "10.0.1.1:11211"}, + {"827616", "10.0.1.8:11211"}, + {"827849", "10.0.1.1:11211"}, + {"828082", "10.0.1.7:11211"}, + {"828315", "10.0.1.7:11211"}, + {"828548", "10.0.1.4:11211"}, + {"828781", "10.0.1.7:11211"}, + {"829014", "10.0.1.5:11211"}, + {"829247", "10.0.1.7:11211"}, + {"829480", "10.0.1.2:11211"}, + {"829713", "10.0.1.4:11211"}, + {"829946", "10.0.1.2:11211"}, + {"830179", "10.0.1.1:11211"}, + {"830412", "10.0.1.3:11211"}, + {"830645", "10.0.1.6:11211"}, + {"830878", "10.0.1.7:11211"}, + {"831111", "10.0.1.1:11211"}, + {"831344", "10.0.1.7:11211"}, + {"831577", "10.0.1.7:11211"}, + {"831810", "10.0.1.1:11211"}, + {"832043", "10.0.1.8:11211"}, + {"832276", "10.0.1.3:11211"}, + {"832509", "10.0.1.3:11211"}, + {"832742", "10.0.1.6:11211"}, + {"832975", "10.0.1.4:11211"}, + {"833208", "10.0.1.1:11211"}, + {"833441", "10.0.1.4:11211"}, + {"833674", "10.0.1.7:11211"}, + {"833907", "10.0.1.3:11211"}, + {"834140", "10.0.1.4:11211"}, + {"834373", "10.0.1.3:11211"}, + {"834606", "10.0.1.7:11211"}, + {"834839", "10.0.1.3:11211"}, + {"835072", "10.0.1.4:11211"}, + {"835305", "10.0.1.5:11211"}, + {"835538", "10.0.1.3:11211"}, + {"835771", "10.0.1.3:11211"}, + {"836004", "10.0.1.6:11211"}, + {"836237", "10.0.1.7:11211"}, + {"836470", "10.0.1.1:11211"}, + {"836703", "10.0.1.7:11211"}, + {"836936", "10.0.1.2:11211"}, + {"837169", "10.0.1.4:11211"}, + {"837402", "10.0.1.2:11211"}, + {"837635", "10.0.1.6:11211"}, + {"837868", "10.0.1.8:11211"}, + {"838101", "10.0.1.2:11211"}, + {"838334", "10.0.1.4:11211"}, + {"838567", "10.0.1.4:11211"}, + {"838800", "10.0.1.7:11211"}, + {"839033", "10.0.1.4:11211"}, + {"839266", "10.0.1.1:11211"}, + {"839499", "10.0.1.5:11211"}, + {"839732", "10.0.1.3:11211"}, + {"839965", "10.0.1.7:11211"}, + {"840198", "10.0.1.7:11211"}, + {"840431", "10.0.1.7:11211"}, + {"840664", "10.0.1.6:11211"}, + {"840897", "10.0.1.1:11211"}, + {"841130", "10.0.1.7:11211"}, + {"841363", "10.0.1.2:11211"}, + {"841596", "10.0.1.6:11211"}, + {"841829", "10.0.1.7:11211"}, + {"842062", "10.0.1.4:11211"}, + {"842295", "10.0.1.7:11211"}, + {"842528", "10.0.1.7:11211"}, + {"842761", "10.0.1.2:11211"}, + {"842994", "10.0.1.7:11211"}, + {"843227", "10.0.1.7:11211"}, + {"843460", "10.0.1.5:11211"}, + {"843693", "10.0.1.1:11211"}, + {"843926", "10.0.1.5:11211"}, + {"844159", "10.0.1.1:11211"}, + {"844392", "10.0.1.6:11211"}, + {"844625", "10.0.1.1:11211"}, + {"844858", "10.0.1.5:11211"}, + {"845091", "10.0.1.8:11211"}, + {"845324", "10.0.1.6:11211"}, + {"845557", "10.0.1.8:11211"}, + {"845790", "10.0.1.4:11211"}, + {"846023", "10.0.1.1:11211"}, + {"846256", "10.0.1.2:11211"}, + {"846489", "10.0.1.8:11211"}, + {"846722", "10.0.1.4:11211"}, + {"846955", "10.0.1.2:11211"}, + {"847188", "10.0.1.6:11211"}, + {"847421", "10.0.1.1:11211"}, + {"847654", "10.0.1.2:11211"}, + {"847887", "10.0.1.1:11211"}, + {"848120", "10.0.1.8:11211"}, + {"848353", "10.0.1.6:11211"}, + {"848586", "10.0.1.6:11211"}, + {"848819", "10.0.1.2:11211"}, + {"849052", "10.0.1.3:11211"}, + {"849285", "10.0.1.1:11211"}, + {"849518", "10.0.1.5:11211"}, + {"849751", "10.0.1.3:11211"}, + {"849984", "10.0.1.6:11211"}, + {"850217", "10.0.1.2:11211"}, + {"855576", "10.0.1.8:11211"}, + {"855809", "10.0.1.2:11211"}, + {"856042", "10.0.1.7:11211"}, + {"856275", "10.0.1.6:11211"}, + {"856508", "10.0.1.7:11211"}, + {"856741", "10.0.1.3:11211"}, + {"856974", "10.0.1.2:11211"}, + {"857207", "10.0.1.4:11211"}, + {"857440", "10.0.1.7:11211"}, + {"857673", "10.0.1.5:11211"}, + {"857906", "10.0.1.8:11211"}, + {"858139", "10.0.1.3:11211"}, + {"858372", "10.0.1.1:11211"}, + {"858605", "10.0.1.1:11211"}, + {"858838", "10.0.1.6:11211"}, + {"859071", "10.0.1.5:11211"}, + {"859304", "10.0.1.3:11211"}, + {"859537", "10.0.1.2:11211"}, + {"859770", "10.0.1.8:11211"}, + {"860003", "10.0.1.8:11211"}, + {"860236", "10.0.1.4:11211"}, + {"860469", "10.0.1.2:11211"}, + {"860702", "10.0.1.7:11211"}, + {"860935", "10.0.1.6:11211"}, + {"861168", "10.0.1.4:11211"}, + {"861401", "10.0.1.2:11211"}, + {"861634", "10.0.1.5:11211"}, + {"861867", "10.0.1.5:11211"}, + {"862100", "10.0.1.1:11211"}, + {"862333", "10.0.1.8:11211"}, + {"862566", "10.0.1.4:11211"}, + {"862799", "10.0.1.1:11211"}, + {"863032", "10.0.1.4:11211"}, + {"863265", "10.0.1.3:11211"}, + {"863498", "10.0.1.6:11211"}, + {"863731", "10.0.1.8:11211"}, + {"863964", "10.0.1.6:11211"}, + {"864197", "10.0.1.5:11211"}, + {"864430", "10.0.1.6:11211"}, + {"864663", "10.0.1.1:11211"}, + {"864896", "10.0.1.6:11211"}, + {"865129", "10.0.1.3:11211"}, + {"865362", "10.0.1.8:11211"}, + {"865595", "10.0.1.4:11211"}, + {"865828", "10.0.1.8:11211"}, + {"866061", "10.0.1.5:11211"}, + {"866294", "10.0.1.3:11211"}, + {"866527", "10.0.1.5:11211"}, + {"866760", "10.0.1.3:11211"}, + {"866993", "10.0.1.2:11211"}, + {"867226", "10.0.1.1:11211"}, + {"867459", "10.0.1.1:11211"}, + {"867692", "10.0.1.6:11211"}, + {"867925", "10.0.1.6:11211"}, + {"868158", "10.0.1.4:11211"}, + {"868391", "10.0.1.8:11211"}, + {"868624", "10.0.1.3:11211"}, + {"868857", "10.0.1.5:11211"}, + {"869090", "10.0.1.4:11211"}, + {"869323", "10.0.1.7:11211"}, + {"869556", "10.0.1.1:11211"}, + {"869789", "10.0.1.2:11211"}, + {"870022", "10.0.1.1:11211"}, + {"870255", "10.0.1.4:11211"}, + {"870488", "10.0.1.7:11211"}, + {"870721", "10.0.1.1:11211"}, + {"870954", "10.0.1.7:11211"}, + {"871187", "10.0.1.1:11211"}, + {"871420", "10.0.1.4:11211"}, + {"871653", "10.0.1.4:11211"}, + {"871886", "10.0.1.8:11211"}, + {"872119", "10.0.1.6:11211"}, + {"872352", "10.0.1.6:11211"}, + {"872585", "10.0.1.1:11211"}, + {"872818", "10.0.1.3:11211"}, + {"873051", "10.0.1.4:11211"}, + {"873284", "10.0.1.8:11211"}, + {"873517", "10.0.1.3:11211"}, + {"873750", "10.0.1.8:11211"}, + {"877711", "10.0.1.6:11211"}, + {"877944", "10.0.1.6:11211"}, + {"878177", "10.0.1.6:11211"}, + {"878410", "10.0.1.3:11211"}, + {"878643", "10.0.1.6:11211"}, + {"878876", "10.0.1.5:11211"}, + {"879109", "10.0.1.4:11211"}, + {"879342", "10.0.1.7:11211"}, + {"879575", "10.0.1.6:11211"}, + {"879808", "10.0.1.1:11211"}, + {"880041", "10.0.1.7:11211"}, + {"880274", "10.0.1.8:11211"}, + {"880507", "10.0.1.2:11211"}, + {"880740", "10.0.1.7:11211"}, + {"880973", "10.0.1.5:11211"}, + {"881206", "10.0.1.8:11211"}, + {"881439", "10.0.1.6:11211"}, + {"881672", "10.0.1.6:11211"}, + {"881905", "10.0.1.2:11211"}, + {"882138", "10.0.1.8:11211"}, + {"882371", "10.0.1.3:11211"}, + {"882604", "10.0.1.1:11211"}, + {"882837", "10.0.1.2:11211"}, + {"883070", "10.0.1.8:11211"}, + {"883303", "10.0.1.3:11211"}, + {"883536", "10.0.1.1:11211"}, + {"883769", "10.0.1.5:11211"}, + {"884002", "10.0.1.2:11211"}, + {"884235", "10.0.1.5:11211"}, + {"884468", "10.0.1.4:11211"}, + {"884701", "10.0.1.3:11211"}, + {"884934", "10.0.1.7:11211"}, + {"885167", "10.0.1.4:11211"}, + {"885400", "10.0.1.7:11211"}, + {"885633", "10.0.1.8:11211"}, + {"885866", "10.0.1.7:11211"}, + {"886099", "10.0.1.4:11211"}, + {"886332", "10.0.1.4:11211"}, + {"886565", "10.0.1.7:11211"}, + {"886798", "10.0.1.3:11211"}, + {"887031", "10.0.1.3:11211"}, + {"887264", "10.0.1.6:11211"}, + {"887497", "10.0.1.7:11211"}, + {"887730", "10.0.1.7:11211"}, + {"887963", "10.0.1.7:11211"}, + {"888196", "10.0.1.3:11211"}, + {"888429", "10.0.1.8:11211"}, + {"888662", "10.0.1.5:11211"}, + {"888895", "10.0.1.4:11211"}, + {"889128", "10.0.1.3:11211"}, + {"889361", "10.0.1.3:11211"}, + {"889594", "10.0.1.4:11211"}, + {"889827", "10.0.1.5:11211"}, + {"890060", "10.0.1.7:11211"}, + {"890293", "10.0.1.3:11211"}, + {"890526", "10.0.1.1:11211"}, + {"890759", "10.0.1.3:11211"}, + {"890992", "10.0.1.3:11211"}, + {"891225", "10.0.1.1:11211"}, + {"891458", "10.0.1.6:11211"}, + {"891691", "10.0.1.6:11211"}, + {"891924", "10.0.1.2:11211"}, + {"892157", "10.0.1.4:11211"}, + {"892390", "10.0.1.4:11211"}, + {"892623", "10.0.1.8:11211"}, + {"892856", "10.0.1.1:11211"}, + {"893089", "10.0.1.7:11211"}, + {"893322", "10.0.1.8:11211"}, + {"893555", "10.0.1.5:11211"}, + {"893788", "10.0.1.6:11211"}, + {"894021", "10.0.1.8:11211"}, + {"894254", "10.0.1.6:11211"}, + {"894487", "10.0.1.4:11211"}, + {"894720", "10.0.1.6:11211"}, + {"894953", "10.0.1.6:11211"}, + {"895186", "10.0.1.3:11211"}, + {"895419", "10.0.1.1:11211"}, + {"895652", "10.0.1.8:11211"}, + {"895885", "10.0.1.2:11211"}, + {"896118", "10.0.1.7:11211"}, + {"896351", "10.0.1.3:11211"}, + {"896584", "10.0.1.2:11211"}, + {"896817", "10.0.1.1:11211"}, + {"897050", "10.0.1.2:11211"}, + {"897283", "10.0.1.2:11211"}, + {"897516", "10.0.1.4:11211"}, + {"897749", "10.0.1.5:11211"}, + {"897982", "10.0.1.6:11211"}, + {"898215", "10.0.1.5:11211"}, + {"898448", "10.0.1.7:11211"}, + {"898681", "10.0.1.2:11211"}, + {"898914", "10.0.1.2:11211"}, + {"899147", "10.0.1.4:11211"}, + {"899380", "10.0.1.5:11211"}, + {"899613", "10.0.1.1:11211"}, + {"899846", "10.0.1.2:11211"}, + {"900079", "10.0.1.3:11211"}, + {"900312", "10.0.1.1:11211"}, + {"900545", "10.0.1.6:11211"}, + {"900778", "10.0.1.6:11211"}, + {"901011", "10.0.1.2:11211"}, + {"901244", "10.0.1.7:11211"}, + {"901477", "10.0.1.6:11211"}, + {"901710", "10.0.1.2:11211"}, + {"901943", "10.0.1.8:11211"}, + {"902176", "10.0.1.6:11211"}, + {"902409", "10.0.1.7:11211"}, + {"902642", "10.0.1.4:11211"}, + {"902875", "10.0.1.5:11211"}, + {"903108", "10.0.1.6:11211"}, + {"907535", "10.0.1.1:11211"}, + {"907768", "10.0.1.3:11211"}, + {"908001", "10.0.1.6:11211"}, + {"908234", "10.0.1.5:11211"}, + {"908467", "10.0.1.2:11211"}, + {"908700", "10.0.1.8:11211"}, + {"908933", "10.0.1.8:11211"}, + {"909166", "10.0.1.2:11211"}, + {"909399", "10.0.1.2:11211"}, + {"909632", "10.0.1.7:11211"}, + {"909865", "10.0.1.3:11211"}, + {"910098", "10.0.1.2:11211"}, + {"910331", "10.0.1.6:11211"}, + {"910564", "10.0.1.2:11211"}, + {"910797", "10.0.1.5:11211"}, + {"911030", "10.0.1.8:11211"}, + {"911263", "10.0.1.7:11211"}, + {"911496", "10.0.1.2:11211"}, + {"911729", "10.0.1.2:11211"}, + {"911962", "10.0.1.1:11211"}, + {"912195", "10.0.1.5:11211"}, + {"912428", "10.0.1.8:11211"}, + {"912661", "10.0.1.8:11211"}, + {"912894", "10.0.1.1:11211"}, + {"913127", "10.0.1.8:11211"}, + {"913360", "10.0.1.7:11211"}, + {"913593", "10.0.1.8:11211"}, + {"913826", "10.0.1.1:11211"}, + {"914059", "10.0.1.2:11211"}, + {"914292", "10.0.1.8:11211"}, + {"914525", "10.0.1.5:11211"}, + {"914758", "10.0.1.1:11211"}, + {"914991", "10.0.1.4:11211"}, + {"915224", "10.0.1.7:11211"}, + {"915457", "10.0.1.1:11211"}, + {"915690", "10.0.1.2:11211"}, + {"915923", "10.0.1.1:11211"}, + {"916156", "10.0.1.8:11211"}, + {"916389", "10.0.1.6:11211"}, + {"916622", "10.0.1.8:11211"}, + {"916855", "10.0.1.5:11211"}, + {"917088", "10.0.1.6:11211"}, + {"917321", "10.0.1.2:11211"}, + {"917554", "10.0.1.8:11211"}, + {"917787", "10.0.1.3:11211"}, + {"918020", "10.0.1.3:11211"}, + {"918253", "10.0.1.1:11211"}, + {"918486", "10.0.1.1:11211"}, + {"918719", "10.0.1.5:11211"}, + {"918952", "10.0.1.1:11211"}, + {"919185", "10.0.1.2:11211"}, + {"919418", "10.0.1.3:11211"}, + {"919651", "10.0.1.6:11211"}, + {"919884", "10.0.1.7:11211"}, + {"920117", "10.0.1.8:11211"}, + {"920350", "10.0.1.4:11211"}, + {"920583", "10.0.1.7:11211"}, + {"920816", "10.0.1.2:11211"}, + {"921049", "10.0.1.6:11211"}, + {"921282", "10.0.1.6:11211"}, + {"921515", "10.0.1.2:11211"}, + {"921748", "10.0.1.2:11211"}, + {"921981", "10.0.1.1:11211"}, + {"922214", "10.0.1.7:11211"}, + {"922447", "10.0.1.5:11211"}, + {"922680", "10.0.1.7:11211"}, + {"922913", "10.0.1.6:11211"}, + {"923146", "10.0.1.3:11211"}, + {"923379", "10.0.1.1:11211"}, + {"923612", "10.0.1.5:11211"}, + {"923845", "10.0.1.7:11211"}, + {"924078", "10.0.1.4:11211"}, + {"924311", "10.0.1.7:11211"}, + {"924544", "10.0.1.2:11211"}, + {"924777", "10.0.1.7:11211"}, + {"925010", "10.0.1.8:11211"}, + {"925243", "10.0.1.8:11211"}, + {"925476", "10.0.1.7:11211"}, + {"925709", "10.0.1.8:11211"}, + {"925942", "10.0.1.6:11211"}, + {"926175", "10.0.1.4:11211"}, + {"926408", "10.0.1.3:11211"}, + {"926641", "10.0.1.2:11211"}, + {"926874", "10.0.1.5:11211"}, + {"927107", "10.0.1.3:11211"}, + {"927340", "10.0.1.1:11211"}, + {"927573", "10.0.1.3:11211"}, + {"927806", "10.0.1.3:11211"}, + {"932699", "10.0.1.4:11211"}, + {"932932", "10.0.1.7:11211"}, + {"933165", "10.0.1.3:11211"}, + {"933398", "10.0.1.8:11211"}, + {"933631", "10.0.1.5:11211"}, + {"933864", "10.0.1.6:11211"}, + {"934097", "10.0.1.8:11211"}, + {"934330", "10.0.1.5:11211"}, + {"934563", "10.0.1.1:11211"}, + {"934796", "10.0.1.4:11211"}, + {"935029", "10.0.1.2:11211"}, + {"935262", "10.0.1.6:11211"}, + {"935495", "10.0.1.3:11211"}, + {"935728", "10.0.1.6:11211"}, + {"935961", "10.0.1.3:11211"}, + {"936194", "10.0.1.6:11211"}, + {"936427", "10.0.1.6:11211"}, + {"936660", "10.0.1.5:11211"}, + {"936893", "10.0.1.7:11211"}, + {"937126", "10.0.1.8:11211"}, + {"937359", "10.0.1.6:11211"}, + {"937592", "10.0.1.7:11211"}, + {"937825", "10.0.1.3:11211"}, + {"938058", "10.0.1.7:11211"}, + {"938291", "10.0.1.7:11211"}, + {"938524", "10.0.1.2:11211"}, + {"938757", "10.0.1.6:11211"}, + {"938990", "10.0.1.2:11211"}, + {"939223", "10.0.1.7:11211"}, + {"939456", "10.0.1.4:11211"}, + {"939689", "10.0.1.1:11211"}, + {"939922", "10.0.1.5:11211"}, + {"940155", "10.0.1.2:11211"}, + {"940388", "10.0.1.1:11211"}, + {"940621", "10.0.1.4:11211"}, + {"940854", "10.0.1.3:11211"}, + {"941087", "10.0.1.4:11211"}, + {"944815", "10.0.1.4:11211"}, + {"945048", "10.0.1.1:11211"}, + {"945281", "10.0.1.2:11211"}, + {"945514", "10.0.1.4:11211"}, + {"945747", "10.0.1.5:11211"}, + {"945980", "10.0.1.4:11211"}, + {"946213", "10.0.1.2:11211"}, + {"946446", "10.0.1.2:11211"}, + {"946679", "10.0.1.1:11211"}, + {"946912", "10.0.1.3:11211"}, + {"947145", "10.0.1.3:11211"}, + {"947378", "10.0.1.1:11211"}, + {"947611", "10.0.1.3:11211"}, + {"947844", "10.0.1.2:11211"}, + {"948077", "10.0.1.3:11211"}, + {"948310", "10.0.1.7:11211"}, + {"948543", "10.0.1.5:11211"}, + {"948776", "10.0.1.7:11211"}, + {"949009", "10.0.1.6:11211"}, + {"949242", "10.0.1.1:11211"}, + {"949475", "10.0.1.4:11211"}, + {"949708", "10.0.1.2:11211"}, + {"949941", "10.0.1.5:11211"}, + {"950174", "10.0.1.1:11211"}, + {"950407", "10.0.1.4:11211"}, + {"950640", "10.0.1.7:11211"}, + {"950873", "10.0.1.3:11211"}, + {"951106", "10.0.1.3:11211"}, + {"951339", "10.0.1.1:11211"}, + {"951572", "10.0.1.7:11211"}, + {"951805", "10.0.1.7:11211"}, + {"952038", "10.0.1.1:11211"}, + {"952271", "10.0.1.8:11211"}, + {"952504", "10.0.1.8:11211"}, + {"952737", "10.0.1.7:11211"}, + {"952970", "10.0.1.1:11211"}, + {"953203", "10.0.1.2:11211"}, + {"953436", "10.0.1.5:11211"}, + {"953669", "10.0.1.2:11211"}, + {"953902", "10.0.1.4:11211"}, + {"954135", "10.0.1.1:11211"}, + {"954368", "10.0.1.6:11211"}, + {"954601", "10.0.1.7:11211"}, + {"954834", "10.0.1.3:11211"}, + {"955067", "10.0.1.6:11211"}, + {"955300", "10.0.1.7:11211"}, + {"955533", "10.0.1.3:11211"}, + {"955766", "10.0.1.1:11211"}, + {"955999", "10.0.1.8:11211"}, + {"956232", "10.0.1.3:11211"}, + {"956465", "10.0.1.4:11211"}, + {"956698", "10.0.1.5:11211"}, + {"956931", "10.0.1.2:11211"}, + {"957164", "10.0.1.5:11211"}, + {"957397", "10.0.1.6:11211"}, + {"957630", "10.0.1.7:11211"}, + {"957863", "10.0.1.7:11211"}, + {"958096", "10.0.1.8:11211"}, + {"958329", "10.0.1.4:11211"}, + {"958562", "10.0.1.8:11211"}, + {"958795", "10.0.1.4:11211"}, + {"959028", "10.0.1.6:11211"}, + {"959261", "10.0.1.5:11211"}, + {"959494", "10.0.1.1:11211"}, + {"959727", "10.0.1.6:11211"}, + {"959960", "10.0.1.5:11211"}, + {"960193", "10.0.1.4:11211"}, + {"960426", "10.0.1.8:11211"}, + {"960659", "10.0.1.2:11211"}, + {"960892", "10.0.1.8:11211"}, + {"961125", "10.0.1.8:11211"}, + {"961358", "10.0.1.2:11211"}, + {"961591", "10.0.1.2:11211"}, + {"961824", "10.0.1.5:11211"}, + {"962057", "10.0.1.1:11211"}, + {"962290", "10.0.1.3:11211"}, + {"962523", "10.0.1.6:11211"}, + {"962756", "10.0.1.6:11211"}, + {"962989", "10.0.1.6:11211"}, + {"963222", "10.0.1.4:11211"}, + {"963455", "10.0.1.2:11211"}, + {"963688", "10.0.1.1:11211"}, + {"963921", "10.0.1.6:11211"}, + {"964154", "10.0.1.3:11211"}, + {"964387", "10.0.1.1:11211"}, + {"964620", "10.0.1.7:11211"}, + {"964853", "10.0.1.2:11211"}, + {"965086", "10.0.1.5:11211"}, + {"965319", "10.0.1.5:11211"}, + {"965552", "10.0.1.7:11211"}, + {"965785", "10.0.1.7:11211"}, + {"966018", "10.0.1.3:11211"}, + {"966251", "10.0.1.6:11211"}, + {"966484", "10.0.1.8:11211"}, + {"966717", "10.0.1.7:11211"}, + {"966950", "10.0.1.6:11211"}, + {"967183", "10.0.1.3:11211"}, + {"967416", "10.0.1.1:11211"}, + {"967649", "10.0.1.2:11211"}, + {"967882", "10.0.1.8:11211"}, + {"968115", "10.0.1.7:11211"}, + {"968348", "10.0.1.3:11211"}, + {"968581", "10.0.1.4:11211"}, + {"968814", "10.0.1.4:11211"}, + {"969047", "10.0.1.3:11211"}, + {"969280", "10.0.1.7:11211"}, + {"969513", "10.0.1.6:11211"}, + {"969746", "10.0.1.1:11211"}, + {"969979", "10.0.1.4:11211"}, + {"970212", "10.0.1.1:11211"}, + {"970445", "10.0.1.6:11211"}, + {"970678", "10.0.1.1:11211"}, + {"970911", "10.0.1.3:11211"}, + {"971144", "10.0.1.6:11211"}, + {"971377", "10.0.1.1:11211"}, + {"971610", "10.0.1.1:11211"}, + {"971843", "10.0.1.4:11211"}, + {"972076", "10.0.1.4:11211"}, + {"972309", "10.0.1.3:11211"}, + {"976037", "10.0.1.1:11211"}, + {"976270", "10.0.1.2:11211"}, + {"976503", "10.0.1.6:11211"}, + {"976736", "10.0.1.7:11211"}, + {"976969", "10.0.1.7:11211"}, + {"977202", "10.0.1.7:11211"}, + {"977435", "10.0.1.6:11211"}, + {"977668", "10.0.1.6:11211"}, + {"977901", "10.0.1.2:11211"}, + {"978134", "10.0.1.5:11211"}, + {"978367", "10.0.1.6:11211"}, + {"978600", "10.0.1.5:11211"}, + {"978833", "10.0.1.1:11211"}, + {"979066", "10.0.1.5:11211"}, + {"979299", "10.0.1.5:11211"}, + {"979532", "10.0.1.3:11211"}, + {"979765", "10.0.1.4:11211"}, + {"979998", "10.0.1.8:11211"}, + {"980231", "10.0.1.3:11211"}, + {"980464", "10.0.1.8:11211"}, + {"980697", "10.0.1.1:11211"}, + {"980930", "10.0.1.2:11211"}, + {"981163", "10.0.1.5:11211"}, + {"987454", "10.0.1.2:11211"}, + {"987687", "10.0.1.2:11211"}, + {"987920", "10.0.1.7:11211"}, + {"988153", "10.0.1.1:11211"}, + {"988386", "10.0.1.1:11211"}, + {"988619", "10.0.1.6:11211"}, + {"988852", "10.0.1.3:11211"}, + {"989085", "10.0.1.5:11211"}, + {"989318", "10.0.1.7:11211"}, + {"989551", "10.0.1.5:11211"}, + {"989784", "10.0.1.8:11211"}, + {"990017", "10.0.1.1:11211"}, + {"990250", "10.0.1.5:11211"}, + {"990483", "10.0.1.8:11211"}, + {"990716", "10.0.1.5:11211"}, + {"990949", "10.0.1.4:11211"}, + {"991182", "10.0.1.8:11211"}, + {"991415", "10.0.1.3:11211"}, + {"991648", "10.0.1.4:11211"}, + {"991881", "10.0.1.5:11211"}, + {"992114", "10.0.1.5:11211"}, + {"992347", "10.0.1.3:11211"}, + {"992580", "10.0.1.7:11211"}, + {"992813", "10.0.1.4:11211"}, + {"993046", "10.0.1.6:11211"}, + {"993279", "10.0.1.3:11211"}, + {"993512", "10.0.1.5:11211"}, + {"993745", "10.0.1.4:11211"}, + {"993978", "10.0.1.7:11211"}, + {"994211", "10.0.1.7:11211"}, + {"994444", "10.0.1.5:11211"}, + {"994677", "10.0.1.1:11211"}, + {"994910", "10.0.1.7:11211"}, + {"995143", "10.0.1.7:11211"}, + {"995376", "10.0.1.4:11211"}, + {"995609", "10.0.1.1:11211"}, + {"995842", "10.0.1.6:11211"}, + {"996075", "10.0.1.6:11211"}, + {"996308", "10.0.1.6:11211"}, + {"996541", "10.0.1.2:11211"}, + {"996774", "10.0.1.6:11211"}, + {"997007", "10.0.1.7:11211"}, + {"997240", "10.0.1.2:11211"}, + {"997473", "10.0.1.1:11211"}, + {"997706", "10.0.1.4:11211"}, + {"999104", "10.0.1.8:11211"}, + {"999337", "10.0.1.4:11211"}, + {"999570", "10.0.1.6:11211"}, + {"999803", "10.0.1.4:11211"} + }; + + for (String[] s : exp) { + String k = s[0]; + String server = s[1]; + MemcachedNode n=locator.getPrimary(k); + assertEquals("/" + server, n.getSocketAddress().toString()); + } + + } +} diff --git a/src/test/java/net/spy/memcached/LongClientTest.java b/src/test/java/net/spy/memcached/LongClientTest.java new file mode 100644 index 000000000..156196e7f --- /dev/null +++ b/src/test/java/net/spy/memcached/LongClientTest.java @@ -0,0 +1,70 @@ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.compat.SyncThread; + +/** + * Longer running test case. + */ +public class LongClientTest extends ClientBaseCase { + + public void testParallelGet() throws Throwable { + // Get a connection with the get optimization disabled. + client.shutdown(); + initClient(new DefaultConnectionFactory(){ + @Override + public MemcachedConnection createConnection( + List addrs) throws IOException { + MemcachedConnection rv = super.createConnection(addrs); + return rv; + } + @Override + public long getOperationTimeout() { + return 15000; + } + @Override + public boolean shouldOptimize() { + return false; + } + }); + + // Throw in some seed data. + byte data[]=new byte[32768]; + Random r=new Random(); + r.nextBytes(data); + final int hashcode=Arrays.hashCode(data); + final Collection keys=new ArrayList(); + for(int i=0; i<50; i++) { + client.set("k" + i, 60, data); + keys.add("k" + i); + } + + // Make sure it got in. + client.waitForQueues(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + int cnt=SyncThread.getDistinctResultCount(25, new Callable(){ + public Integer call() throws Exception { + for(int i=0; i<25; i++) { + Map m = client.getBulk(keys); + for(String s : keys) { + byte b[]=(byte[])m.get(s); + assert Arrays.hashCode(b) == hashcode + : "Expected " + hashcode + " was " + + Arrays.hashCode(b); + } + } + return hashcode; + }}); + assertEquals(cnt, 25); + } +} diff --git a/src/test/java/net/spy/memcached/MemcachedClientConstructorTest.java b/src/test/java/net/spy/memcached/MemcachedClientConstructorTest.java new file mode 100644 index 000000000..6724ecaf3 --- /dev/null +++ b/src/test/java/net/spy/memcached/MemcachedClientConstructorTest.java @@ -0,0 +1,167 @@ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import junit.framework.TestCase; + +/** + * Test the various memcached client constructors. + */ +public class MemcachedClientConstructorTest extends TestCase { + + private MemcachedClient client=null; + + @Override + protected void tearDown() throws Exception { + if(client != null) { + try { + client.shutdown(); + } catch(NullPointerException e) { + // This is a workaround for a disagreement betweewn how things + // should work in eclipse and buildr. My plan is to upgrade to + // junit4 all around and write some tests that are a bit easier + // to follow. + + // The actual problem here is a client that isn't properly + // initialized is attempting to be shut down. + } + } + super.tearDown(); + } + + private void assertWorking() throws Exception { + Map versions = client.getVersions(); + assertEquals("/127.0.0.1:11211", + versions.keySet().iterator().next().toString()); + } + + private void assertArgRequired(IllegalArgumentException e) { + assertEquals("You must have at least one server to connect to", + e.getMessage()); + } + + public void testVarargConstructor() throws Exception { + client = new MemcachedClient( + new InetSocketAddress(InetAddress.getByName("127.0.0.1"), 11211)); + assertWorking(); + } + + public void testEmptyVarargConstructor() throws Exception { + try { + client = new MemcachedClient(); + fail("Expected illegal arg exception, got " + client); + } catch(IllegalArgumentException e) { + assertArgRequired(e); + } + } + + public void testNulListConstructor() throws Exception { + try { + List l=null; + client = new MemcachedClient(l); + fail("Expected null pointer exception, got " + client); + } catch(NullPointerException e) { + assertEquals("Server list required", e.getMessage()); + } + } + + public void testEmptyListConstructor() throws Exception { + try { + client = new MemcachedClient( + Collections.emptyList()); + fail("Expected illegal arg exception, got " + client); + } catch(IllegalArgumentException e) { + assertArgRequired(e); + } + } + + public void testNullFactoryConstructor() throws Exception { + try { + client = new MemcachedClient(null, + AddrUtil.getAddresses("127.0.0.1:11211")); + fail("Expected null pointer exception, got " + client); + } catch(NullPointerException e) { + assertEquals("Connection factory required", e.getMessage()); + } + } + + public void testNegativeTimeout() throws Exception { + try { + client = new MemcachedClient(new DefaultConnectionFactory() { + @Override + public long getOperationTimeout() { + return -1; + }}, + AddrUtil.getAddresses("127.0.0.1:11211")); + fail("Expected null pointer exception, got " + client); + } catch(IllegalArgumentException e) { + assertEquals("Operation timeout must be positive.", e.getMessage()); + } + } + + public void testZeroTimeout() throws Exception { + try { + client = new MemcachedClient(new DefaultConnectionFactory() { + @Override + public long getOperationTimeout() { + return 0; + }}, + AddrUtil.getAddresses("127.0.0.1:11211")); + fail("Expected null pointer exception, got " + client); + } catch(IllegalArgumentException e) { + assertEquals("Operation timeout must be positive.", e.getMessage()); + } + } + + public void testConnFactoryWithoutOpFactory() throws Exception { + try { + client = new MemcachedClient(new DefaultConnectionFactory(){ + @Override + public OperationFactory getOperationFactory() { + return null; + } + }, AddrUtil.getAddresses("127.0.0.1:11211")); + } catch(AssertionError e) { + assertEquals("Connection factory failed to make op factory", + e.getMessage()); + } + } + + public void testConnFactoryWithoutConns() throws Exception { + try { + client = new MemcachedClient(new DefaultConnectionFactory(){ + @Override + public MemcachedConnection createConnection( + List addrs) throws IOException { + return null; + } + }, AddrUtil.getAddresses("127.0.0.1:11211")); + } catch(AssertionError e) { + assertEquals("Connection factory failed to make a connection", + e.getMessage()); + } + + } + + public void testArraymodNodeLocatorAccessor() throws Exception { + client = new MemcachedClient(AddrUtil.getAddresses("127.0.0.1:11211")); + assertTrue(client.getNodeLocator() instanceof ArrayModNodeLocator); + assertTrue(client.getNodeLocator().getPrimary("x") + instanceof MemcachedNodeROImpl); + } + + public void testKetamaNodeLocatorAccessor() throws Exception { + client = new MemcachedClient(new KetamaConnectionFactory(), + AddrUtil.getAddresses("127.0.0.1:11211")); + assertTrue(client.getNodeLocator() instanceof KetamaNodeLocator); + assertTrue(client.getNodeLocator().getPrimary("x") + instanceof MemcachedNodeROImpl); + } + +} diff --git a/src/test/java/net/spy/memcached/MemcachedConnectionTest.java b/src/test/java/net/spy/memcached/MemcachedConnectionTest.java new file mode 100644 index 000000000..2f477ca43 --- /dev/null +++ b/src/test/java/net/spy/memcached/MemcachedConnectionTest.java @@ -0,0 +1,134 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.TestCase; + +/** + * Test stuff that can be tested within a MemcachedConnection separately. + */ +public class MemcachedConnectionTest extends TestCase { + + private MemcachedConnection conn; + private ArcusKetamaNodeLocator locator; + + @Override + protected void setUp() throws Exception { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + ConnectionFactory cf = cfb.build(); + List addrs = new ArrayList(); + + conn = new MemcachedConnection(1024, cf, addrs, cf.getInitialObservers(), cf.getFailureMode(), cf.getOperationFactory()); + locator = (ArcusKetamaNodeLocator) conn.getLocator(); + } + + @Override + protected void tearDown() throws Exception { + conn.shutdown(); + } + + public void testDebugBuffer() throws Exception { + String input="this is a test _"; + ByteBuffer bb=ByteBuffer.wrap(input.getBytes()); + String s=MemcachedConnection.dbgBuffer(bb, input.length()); + assertEquals("this is a test \\x5f", s); + } + + public void testNodeManageQueue() throws Exception { + // when + conn.putMemcachedQueue("0.0.0.0:11211"); + conn.putMemcachedQueue("0.0.0.0:11211,0.0.0.0:11212,0.0.0.0:11213"); + conn.putMemcachedQueue("0.0.0.0:11212"); + + // 1st test (nodes=1) + conn.handleNodeManageQueue(); + + // then + assertTrue(1 == locator.allNodes.size()); + + // 2nd test (nodes=3) + conn.handleNodeManageQueue(); + + // then + assertTrue(3 == locator.allNodes.size()); + + // 3rd test (nodes=1) + conn.handleNodeManageQueue(); + + // then + assertTrue(1 == locator.allNodes.size()); + } + + public void testNodeManageQueue_empty() throws Exception { + // when + // on servers in the queue + + // test + conn.handleNodeManageQueue(); + + // then + assertTrue(0 == locator.allNodes.size()); + } + + public void testNodeManageQueue_invalid_addr() throws Exception { + try { + // when : putting an invalid address + conn.putMemcachedQueue(""); + + // test + conn.handleNodeManageQueue(); + + // should not be here! + //fail(); + } catch (Exception e) { + e.printStackTrace(); + assertEquals("No hosts in list: ``''", e.getMessage()); + } + } + + public void testNodeManageQueue_redundent() throws Exception { + // when + conn.putMemcachedQueue("0.0.0.0:11211,0.0.0.0:11211"); + + // test + conn.handleNodeManageQueue(); + + // then + assertTrue(2 == locator.allNodes.size()); + } + + public void testNodeManageQueue_twice() throws Exception { + // when + conn.putMemcachedQueue("0.0.0.0:11211"); + conn.putMemcachedQueue("0.0.0.0:11211"); + + // test + conn.handleNodeManageQueue(); + + // then + assertTrue(1 == locator.allNodes.size()); + } + + public void testAddOperations() throws Exception { + + } +} diff --git a/src/test/java/net/spy/memcached/MemcachedNodeROImplTest.java b/src/test/java/net/spy/memcached/MemcachedNodeROImplTest.java new file mode 100644 index 000000000..a246785fd --- /dev/null +++ b/src/test/java/net/spy/memcached/MemcachedNodeROImplTest.java @@ -0,0 +1,63 @@ +package net.spy.memcached; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import org.jmock.Mock; +import org.jmock.MockObjectTestCase; + +/** + * Test readonliness of the MemcachedNodeROImpl + */ +public class MemcachedNodeROImplTest extends MockObjectTestCase { + + public void testReadOnliness() throws Exception { + SocketAddress sa=new InetSocketAddress(11211); + Mock m = mock(MemcachedNode.class, "node"); + MemcachedNodeROImpl node= + new MemcachedNodeROImpl((MemcachedNode)m.proxy()); + m.expects(once()).method("getSocketAddress").will(returnValue(sa)); + + assertSame(sa, node.getSocketAddress()); + assertEquals(m.proxy().toString(), node.toString()); + + Set acceptable = new HashSet(Arrays.asList( + "toString", "getSocketAddress", "getBytesRemainingToWrite", + "getReconnectCount", "getSelectionOps", "hasReadOp", + "hasWriteOp", "isActive")); + + for(Method meth : MemcachedNode.class.getMethods()) { + if(acceptable.contains(meth.getName())) { + // ok + } else { + Object[] args=new Object[meth.getParameterTypes().length]; + fillArgs(meth.getParameterTypes(), args); + try { + meth.invoke(node, args); + fail("Failed to break on " + meth.getName()); + } catch(InvocationTargetException e) { + assertSame("Fail at " + meth.getName(), + UnsupportedOperationException.class, + e.getCause().getClass()); + } + } + } + } + + private void fillArgs(Class[] parameterTypes, Object[] args) { + int i=0; + for(Class c : parameterTypes) { + if(c == Boolean.TYPE) { + args[i++] = false; + } else { + args[i++] = null; + } + } + } + +} diff --git a/src/test/java/net/spy/memcached/MockMemcachedNode.java b/src/test/java/net/spy/memcached/MockMemcachedNode.java new file mode 100644 index 000000000..3ff24fb5c --- /dev/null +++ b/src/test/java/net/spy/memcached/MockMemcachedNode.java @@ -0,0 +1,149 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * + */ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.nio.channels.SelectionKey; +import java.nio.channels.SocketChannel; +import java.util.Collection; + +import net.spy.memcached.ops.Operation; + +public class MockMemcachedNode implements MemcachedNode { + private final InetSocketAddress socketAddress; + public SocketAddress getSocketAddress() {return socketAddress;} + + public MockMemcachedNode(InetSocketAddress socketAddress) { + this.socketAddress = socketAddress; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MockMemcachedNode that = (MockMemcachedNode) o; + + if (socketAddress != null + ? !socketAddress.equals(that.socketAddress) + : that.socketAddress != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (socketAddress != null ? socketAddress.hashCode() : 0); + } + + public void copyInputQueue() { + // noop + } + public void setupResend() { + // noop + } + public void fillWriteBuffer(boolean optimizeGets) { + // noop + } + public void transitionWriteItem() { + // noop + } + public Operation getCurrentReadOp() {return null;} + public Operation removeCurrentReadOp() {return null;} + public Operation getCurrentWriteOp() {return null;} + public Operation removeCurrentWriteOp() {return null;} + public boolean hasReadOp() {return false;} + public boolean hasWriteOp() {return false;} + public void addOp(Operation op) { + // noop + } + public void insertOp(Operation op) { + // noop + } + public int getSelectionOps() {return 0;} + public ByteBuffer getRbuf() {return null;} + public ByteBuffer getWbuf() {return null;} + public boolean isActive() {return false;} + public void reconnecting() { + // noop + } + public void connected() { + // noop + } + public int getReconnectCount() {return 0;} + public void registerChannel(SocketChannel ch, SelectionKey selectionKey) { + // noop + } + public void setChannel(SocketChannel to) { + // noop + } + public SocketChannel getChannel() {return null;} + public void setSk(SelectionKey to) { + // noop + } + public SelectionKey getSk() {return null;} + public int getBytesRemainingToWrite() {return 0;} + public int writeSome() throws IOException {return 0;} + public void fixupOps() { + // noop + } + + public Collection destroyInputQueue() { + return null; + } + + public void authComplete() { + // noop + } + + public void setupForAuth() { + // noop + } + + public int getContinuousTimeout() { + return 0; + } + + public void setContinuousTimeout(boolean timedOut) { + // noop + } + + public boolean isFake() { + return false; + } + + public void shutdown() throws IOException { + // noop + } + + @Override + public String getStatus() { + return "MOCK_STATE"; + } +} \ No newline at end of file diff --git a/src/test/java/net/spy/memcached/ObserverTest.java b/src/test/java/net/spy/memcached/ObserverTest.java new file mode 100644 index 000000000..f2e7d027c --- /dev/null +++ b/src/test/java/net/spy/memcached/ObserverTest.java @@ -0,0 +1,71 @@ +package net.spy.memcached; + +import java.net.SocketAddress; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.compat.SpyObject; + +/** + * Test observer hooks. + */ +public class ObserverTest extends ClientBaseCase { + + public void testConnectionObserver() throws Exception { + ConnectionObserver obs = new LoggingObserver(); + assertTrue("Didn't add observer.", client.addObserver(obs)); + assertTrue("Didn't remove observer.", client.removeObserver(obs)); + assertFalse("Removed observer more than once.", + client.removeObserver(obs)); + } + + public void testInitialObservers() throws Exception { + assertTrue("Couldn't shut down within five seconds", + client.shutdown(5, TimeUnit.SECONDS)); + + final CountDownLatch latch = new CountDownLatch(1); + final ConnectionObserver obs = new ConnectionObserver() { + + public void connectionEstablished(SocketAddress sa, + int reconnectCount) { + latch.countDown(); + } + + public void connectionLost(SocketAddress sa) { + assert false : "Should not see this."; + } + + }; + + // Get a new client + initClient(new DefaultConnectionFactory() { + + @Override + public Collection getInitialObservers() { + return Collections.singleton(obs); + } + + }); + + assertTrue("Didn't detect connection", + latch.await(2, TimeUnit.SECONDS)); + assertTrue("Did not install observer.", client.removeObserver(obs)); + assertFalse("Didn't clean up observer.", client.removeObserver(obs)); + } + + static class LoggingObserver extends SpyObject + implements ConnectionObserver { + public void connectionEstablished(SocketAddress sa, + int reconnectCount) { + getLogger().info("Connection established to %s (%s)", + sa, reconnectCount); + } + + public void connectionLost(SocketAddress sa) { + getLogger().info("Connection lost from %s", sa); + } + + } +} diff --git a/src/test/java/net/spy/memcached/OperationFactoryTestBase.java b/src/test/java/net/spy/memcached/OperationFactoryTestBase.java new file mode 100644 index 000000000..de538a048 --- /dev/null +++ b/src/test/java/net/spy/memcached/OperationFactoryTestBase.java @@ -0,0 +1,256 @@ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.Random; + +import net.spy.memcached.ops.CASOperation; +import net.spy.memcached.ops.ConcatenationOperation; +import net.spy.memcached.ops.ConcatenationType; +import net.spy.memcached.ops.DeleteOperation; +import net.spy.memcached.ops.GetOperation; +import net.spy.memcached.ops.GetsOperation; +import net.spy.memcached.ops.KeyedOperation; +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.Mutator; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.ops.OperationCallback; +import net.spy.memcached.ops.OperationStatus; +import net.spy.memcached.ops.StoreOperation; +import net.spy.memcached.ops.StoreType; + +import org.jmock.Mock; +import org.jmock.MockObjectTestCase; + +/** + * Base class for operation factory tests. + */ +public abstract class OperationFactoryTestBase extends MockObjectTestCase { + + public final static String TEST_KEY = "someKey"; + protected OperationFactory ofact = null; + protected OperationCallback genericCallback; + private byte[] testData; + + @Override + protected void setUp() throws Exception { + super.setUp(); + ofact = getOperationFactory(); + genericCallback = new OperationCallback() { + public void complete() { + fail("Unexpected invocation"); + } + public void receivedStatus(OperationStatus status) { + fail("Unexpected status: " + status); + } + }; + + testData = new byte[64]; + new Random().nextBytes(testData); + } + + /** + * Get the operation factory used by the tests. + */ + protected abstract OperationFactory getOperationFactory(); + + public void testDeleteOperationCloning() { + DeleteOperation op = ofact.delete(TEST_KEY, genericCallback); + + DeleteOperation op2 = cloneOne(DeleteOperation.class, op); + assertEquals(TEST_KEY, op2.getKeys().iterator().next()); + assertCallback(op2); + } + + public void testCASOperationCloning() { + CASOperation op = ofact.cas(StoreType.set, + "someKey", 727582, 8174, 7175, testData, genericCallback); + + CASOperation op2 = cloneOne(CASOperation.class, op); + assertKey(op2); + assertEquals(727582, op2.getCasValue()); + assertEquals(8174, op2.getFlags()); + assertEquals(7175, op2.getExpiration()); + assertBytes(op2.getBytes()); + assertCallback(op2); + } + + public void testMutatorOperationIncrCloning() { + int exp = 823862; + long def = 28775; + int by = 7735; + MutatorOperation op = ofact.mutate(Mutator.incr, TEST_KEY, by, def, + exp, genericCallback); + + MutatorOperation op2 = cloneOne(MutatorOperation.class, op); + assertKey(op2); + assertEquals(exp, op2.getExpiration()); + assertEquals(def, op2.getDefault()); + assertEquals(by, op2.getBy()); + assertSame(Mutator.incr, op2.getType()); + assertCallback(op2); + } + + public void testMutatorOperationDecrCloning() { + int exp = 823862; + long def = 28775; + int by = 7735; + MutatorOperation op = ofact.mutate(Mutator.decr, TEST_KEY, by, def, + exp, genericCallback); + + MutatorOperation op2 = cloneOne(MutatorOperation.class, op); + assertKey(op2); + assertEquals(exp, op2.getExpiration()); + assertEquals(def, op2.getDefault()); + assertEquals(by, op2.getBy()); + assertSame(Mutator.decr, op2.getType()); + assertCallback(op2); + } + + public void testStoreOperationAddCloning() { + int exp = 823862; + int flags = 7735; + StoreOperation op = ofact.store(StoreType.add, TEST_KEY, + flags, exp, testData, genericCallback); + + StoreOperation op2 = cloneOne(StoreOperation.class, op); + assertKey(op2); + assertEquals(exp, op2.getExpiration()); + assertEquals(flags, op2.getFlags()); + assertSame(StoreType.add, op2.getStoreType()); + assertCallback(op2); + } + + public void testStoreOperationSetCloning() { + int exp = 823862; + int flags = 7735; + StoreOperation op = ofact.store(StoreType.set, TEST_KEY, + flags, exp, testData, genericCallback); + + StoreOperation op2 = cloneOne(StoreOperation.class, op); + assertKey(op2); + assertEquals(exp, op2.getExpiration()); + assertEquals(flags, op2.getFlags()); + assertSame(StoreType.set, op2.getStoreType()); + assertCallback(op2); + } + + public void testConcatenationOperationAppendCloning() { + long casId = 82757248; + ConcatenationOperation op = ofact.cat(ConcatenationType.append, casId, + TEST_KEY, testData, genericCallback); + + ConcatenationOperation op2 = cloneOne( + ConcatenationOperation.class, op); + assertKey(op2); + assertSame(ConcatenationType.append, op2.getStoreType()); + assertCallback(op2); + } + + public void testConcatenationOperationPrependCloning() { + long casId = 82757248; + ConcatenationOperation op = ofact.cat(ConcatenationType.prepend, casId, + TEST_KEY, testData, genericCallback); + + ConcatenationOperation op2 = cloneOne( + ConcatenationOperation.class, op); + assertKey(op2); + assertSame(ConcatenationType.prepend, op2.getStoreType()); + assertCallback(op2); + } + + public void testSingleGetOperationCloning() { + GetOperation.Callback callback = + (GetOperation.Callback)mock(GetOperation.Callback.class).proxy(); + GetOperation op = ofact.get(TEST_KEY, callback); + + GetOperation op2 = cloneOne(GetOperation.class, op); + assertKey(op2); + assertSame(callback, op.getCallback()); + } + + public void testSingleGetsOperationCloning() { + GetsOperation.Callback callback = + (GetsOperation.Callback)mock(GetsOperation.Callback.class).proxy(); + GetsOperation op = ofact.gets(TEST_KEY, callback); + + GetsOperation op2 = cloneOne(GetsOperation.class, op); + assertKey(op2); + assertSame(callback, op.getCallback()); + } + + // These are harder cases as they fan out. + public void testMultipleGetOperationCloning() { + Collection keys = Arrays.asList("k1", "k2", "k3"); + GetOperation.Callback callback = + (GetOperation.Callback)mock(GetOperation.Callback.class).proxy(); + GetOperation op = ofact.get(keys, callback); + + Collection ops = ofact.clone(op); + assertEquals(3, ops.size()); + + Collection mutableKeys = new ArrayList(keys); + int i = 3; + for(Operation o : ops) { + assertEquals(i, mutableKeys.size()); // Starting size + GetOperation go = (GetOperation)o; + mutableKeys.removeAll(go.getKeys()); + // Verify we matched and removed 1 + assertEquals(--i, mutableKeys.size()); + } + } + + public void testMultipleGetOperationFanout() { + Collection keys = Arrays.asList("k1", "k2", "k3"); + Mock m = mock(GetOperation.Callback.class); + OperationStatus st=new OperationStatus(true, "blah"); + m.expects(once()).method("complete"); + m.expects(once()).method("receivedStatus").with(same(st)); + m.expects(once()).method("gotData") + .with(eq("k1"), eq(1), isA(byte[].class)); + m.expects(once()).method("gotData") + .with(eq("k2"), eq(2), isA(byte[].class)); + m.expects(once()).method("gotData") + .with(eq("k3"), eq(3), isA(byte[].class)); + + GetOperation.Callback callback = (GetOperation.Callback)m.proxy(); + GetOperation op = ofact.get(keys, callback); + + // Transition each operation callback into the complete state. + Iterator ki = keys.iterator(); + int i=0; + for(Operation o : ofact.clone(op)) { + GetOperation.Callback cb = (GetOperation.Callback)o.getCallback(); + cb.gotData(ki.next(), ++i, new byte[3]); + cb.receivedStatus(st); + cb.complete(); + } + } + + protected void assertKey(KeyedOperation op) { + assertEquals(TEST_KEY, op.getKeys().iterator().next()); + } + + protected void assertCallback(Operation op) { + assertSame(genericCallback, op.getCallback()); + } + + private void assertBytes(byte[] bytes) { + assertTrue(Arrays.equals(testData, bytes)); + } + + @SuppressWarnings("unchecked") + private T assertOne(Class class1, + Collection ops) { + assertEquals(1, ops.size()); + Operation op = ops.iterator().next(); + return (T) op; + } + + protected T cloneOne(Class c, KeyedOperation t) { + return assertOne(c, ofact.clone(t)); + } + +} diff --git a/src/test/java/net/spy/memcached/ProtocolBaseCase.java b/src/test/java/net/spy/memcached/ProtocolBaseCase.java new file mode 100644 index 000000000..a49195f7d --- /dev/null +++ b/src/test/java/net/spy/memcached/ProtocolBaseCase.java @@ -0,0 +1,863 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.nio.ByteBuffer; +import java.net.SocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.compat.SyncThread; +import net.spy.memcached.ops.OperationErrorType; +import net.spy.memcached.ops.OperationException; +import net.spy.memcached.transcoders.SerializingTranscoder; +import net.spy.memcached.transcoders.Transcoder; + + +public abstract class ProtocolBaseCase extends ClientBaseCase { + + public void testAssertions() { + boolean caught=false; + try { + assert false; + } catch(AssertionError e) { + caught=true; + } + assertTrue("Assertions are not enabled!", caught); + } + + public void testGetStats() throws Exception { + Map> stats = client.getStats(); + System.out.println("Stats: " + stats); + assertEquals(1, stats.size()); + Map oneStat=stats.values().iterator().next(); + assertTrue(oneStat.containsKey("total_items")); + } + + public void testGetStatsSlabs() throws Exception { + // There needs to at least have been one value set or there may be + // no slabs to check. + client.set("slabinitializer", 0, "hi"); + Map> stats = client.getStats("slabs"); + System.out.println("Stats: " + stats); + assertEquals(1, stats.size()); + Map oneStat=stats.values().iterator().next(); + assertTrue(oneStat.containsKey("0:chunk_size")); + } + + public void testGetStatsSizes() throws Exception { + // Arcus does not support "stats sizes" + if (true) return; + // There needs to at least have been one value set or there may be + // no sizes to check. + client.set("sizeinitializer", 0, "hi"); + Map> stats = client.getStats("sizes"); + System.out.println("Stats: " + stats); + assertEquals(1, stats.size()); + Map oneStat=stats.values().iterator().next(); + assertEquals("1", oneStat.get("96")); + } + + public void testGetStatsCacheDump() throws Exception { + // There needs to at least have been one value set or there + // won't be anything to dump + client.set("dumpinitializer", 0, "hi"); + Map> stats = + client.getStats("cachedump 0 10000"); + System.out.println("Stats: " + stats); + assertEquals(1, stats.size()); + Map oneStat=stats.values().iterator().next(); + String val = oneStat.get("dumpinitializer"); + assertTrue(val + "doesn't match", val.matches("\\[acctime=\\d+, exptime=\\d+\\]")); + } + + public void testDelayedFlush() throws Exception { + assertNull(client.get("test1")); + client.set("test1", 5, "test1value"); + client.set("test2", 5, "test2value"); + assertEquals("test1value", client.get("test1")); + assertEquals("test2value", client.get("test2")); + client.flush(2); + Thread.sleep(2100); + assertNull(client.get("test1")); + assertNull(client.get("test2")); + } + + public void testNoop() { + // This runs through the startup/flush cycle + } + + public void testDoubleShutdown() { + client.shutdown(); + client.shutdown(); + } + + public void testSimpleGet() throws Exception { + assertNull(client.get("test1")); + client.set("test1", 5, "test1value"); + assertEquals("test1value", client.get("test1")); + } + + public void testSimpleCASGets() throws Exception { + assertNull(client.gets("test1")); + client.set("test1", 5, "test1value"); + assertEquals("test1value", client.gets("test1").getValue()); + } + + public void testCAS() throws Exception { + final String key="castestkey"; + // First, make sure it doesn't work for a non-existing value. + assertSame("Expected error CASing with no existing value.", + CASResponse.NOT_FOUND, + client.cas(key, 0x7fffffffffL, "bad value")); + + // OK, stick a value in here. + assertTrue(client.add(key, 5, "original value").get()); + CASValue getsVal = client.gets(key); + assertEquals("original value", getsVal.getValue()); + + // Now try it with an existing value, but wrong CAS id + assertSame("Expected error CASing with invalid id", + CASResponse.EXISTS, + client.cas(key, getsVal.getCas() + 1, "broken value")); + // Validate the original value is still in tact. + assertEquals("original value", getsVal.getValue()); + + // OK, now do a valid update + assertSame("Expected successful CAS with correct id (" + + getsVal.getCas() + ")", + CASResponse.OK, + client.cas(key, getsVal.getCas(), "new value")); + assertEquals("new value", client.get(key)); + + // Test a CAS replay + assertSame("Expected unsuccessful CAS with replayed id", + CASResponse.EXISTS, + client.cas(key, getsVal.getCas(), "crap value")); + assertEquals("new value", client.get(key)); + } + + public void testReallyLongCASId() throws Exception { + String key = "this-is-my-key"; + assertSame("Expected error CASing with no existing value.", + CASResponse.NOT_FOUND, + client.cas(key, 9223372036854775807l, "bad value")); + } + + public void testExtendedUTF8Key() throws Exception { + String key="\u2013\u00ba\u2013\u220f\u2014\u00c4"; + assertNull(client.get(key)); + client.set(key, 5, "test1value"); + assertEquals("test1value", client.get(key)); + } + + public void testInvalidKey1() throws Exception { + try { + client.get("key with spaces"); + fail("Expected IllegalArgumentException getting key with spaces"); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testInvalidKey2() throws Exception { + try { + StringBuilder longKey=new StringBuilder(); + for(int i=0; i<251; i++) { + longKey.append("a"); + } + client.get(longKey.toString()); + fail("Expected IllegalArgumentException getting too long of a key"); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testInvalidKey3() throws Exception { + try { + Object val=client.get("Key\n"); + fail("Expected IllegalArgumentException, got " + val); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testInvalidKey4() throws Exception { + try { + Object val=client.get("Key\r"); + fail("Expected IllegalArgumentException, got " + val); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testInvalidKey5() throws Exception { + try { + Object val=client.get("Key\0"); + fail("Expected IllegalArgumentException, got " + val); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testInvalidKeyBlank() throws Exception { + try { + Object val=client.get(""); + fail("Expected IllegalArgumentException, got " + val); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testInvalidKeyBulk() throws Exception { + try { + Object val=client.getBulk("Key key2"); + fail("Expected IllegalArgumentException, got " + val); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testParallelSetGet() throws Throwable { + int cnt=SyncThread.getDistinctResultCount(10, new Callable(){ + public Boolean call() throws Exception { + for(int i=0; i<10; i++) { + client.set("test" + i, 5, "value" + i); + assertEquals("value" + i, client.get("test" + i)); + } + for(int i=0; i<10; i++) { + assertEquals("value" + i, client.get("test" + i)); + } + return Boolean.TRUE; + }}); + assertEquals(1, cnt); + } + + public void testParallelSetMultiGet() throws Throwable { + int cnt=SyncThread.getDistinctResultCount(10, new Callable(){ + public Boolean call() throws Exception { + for(int i=0; i<10; i++) { + client.set("test" + i, 5, "value" + i); + assertEquals("value" + i, client.get("test" + i)); + } + Map m=client.getBulk("test0", "test1", "test2", + "test3", "test4", "test5", "test6", "test7", "test8", + "test9", "test10"); // Yes, I intentionally ran over. + for(int i=0; i<10; i++) { + assertEquals("value" + i, m.get("test" + i)); + } + return Boolean.TRUE; + }}); + assertEquals(1, cnt); + } + + public void testParallelSetAutoMultiGet() throws Throwable { + int cnt=SyncThread.getDistinctResultCount(10, new Callable(){ + public Boolean call() throws Exception { + client.set("testparallel", 5, "parallelvalue"); + for(int i=0; i<10; i++) { + assertEquals("parallelvalue", client.get("testparallel")); + } + return Boolean.TRUE; + }}); + assertEquals(1, cnt); + } + + public void testAdd() throws Exception { + assertNull(client.get("test1")); + assertTrue(client.set("test1", 5, "test1value").get()); + assertEquals("test1value", client.get("test1")); + assertFalse(client.add("test1", 5, "ignoredvalue").get()); + // Should return the original value + assertEquals("test1value", client.get("test1")); + } + + public void testAddWithTranscoder() throws Exception { + Transcoder t=new TestTranscoder(); + assertNull(client.get("test1", t)); + assertTrue(client.set("test1", 5, "test1value", t).get()); + assertEquals("test1value", client.get("test1", t)); + assertFalse(client.add("test1", 5, "ignoredvalue", t).get()); + // Should return the original value + assertEquals("test1value", client.get("test1", t)); + } + + public void testAddNotSerializable() throws Exception { + try { + client.add("t1", 5, new Object()); + fail("expected illegal argument exception"); + } catch(IllegalArgumentException e) { + assertEquals("Non-serializable object, cause=java.lang.Object", e.getMessage()); + } + } + + public void testSetNotSerializable() throws Exception { + try { + client.set("t1", 5, new Object()); + fail("expected illegal argument exception"); + } catch(IllegalArgumentException e) { + assertEquals("Non-serializable object, cause=java.lang.Object", e.getMessage()); + } + } + + public void testReplaceNotSerializable() throws Exception { + try { + client.replace("t1", 5, new Object()); + fail("expected illegal argument exception"); + } catch(IllegalArgumentException e) { + assertEquals("Non-serializable object, cause=java.lang.Object", e.getMessage()); + } + } + + public void testUpdate() throws Exception { + assertNull(client.get("test1")); + client.replace("test1", 5, "test1value"); + assertNull(client.get("test1")); + } + + public void testUpdateWithTranscoder() throws Exception { + Transcoder t=new TestTranscoder(); + assertNull(client.get("test1", t)); + client.replace("test1", 5, "test1value", t); + assertNull(client.get("test1", t)); + } + + // Just to make sure the sequence is being handled correctly + public void testMixedSetsAndUpdates() throws Exception { + Collection> futures=new ArrayList>(); + Collection keys=new ArrayList(); + for(int i=0; i<100; i++) { + String key="k" + i; + futures.add(client.set(key, 10, key)); + futures.add(client.add(key, 10, "a" + i)); + keys.add(key); + } + Map m=client.getBulk(keys); + assertEquals(100, m.size()); + for(Map.Entry me : m.entrySet()) { + assertEquals(me.getKey(), me.getValue()); + } + for(Iterator> i=futures.iterator();i.hasNext();) { + assertTrue(i.next().get()); + assertFalse(i.next().get()); + } + } + + public void testGetBulk() throws Exception { + Collection keys=Arrays.asList("test1", "test2", "test3"); + assertEquals(0, client.getBulk(keys).size()); + client.set("test1", 5, "val1"); + client.set("test2", 5, "val2"); + Map vals=client.getBulk(keys); + assertEquals(2, vals.size()); + assertEquals("val1", vals.get("test1")); + assertEquals("val2", vals.get("test2")); + } + + public void testGetBulkVararg() throws Exception { + assertEquals(0, client.getBulk("test1", "test2", "test3").size()); + client.set("test1", 5, "val1"); + client.set("test2", 5, "val2"); + Map vals=client.getBulk("test1", "test2", "test3"); + assertEquals(2, vals.size()); + assertEquals("val1", vals.get("test1")); + assertEquals("val2", vals.get("test2")); + } + + public void testGetBulkVarargWithTranscoder() throws Exception { + Transcoder t=new TestTranscoder(); + assertEquals(0, client.getBulk(t, "test1", "test2", "test3").size()); + client.set("test1", 5, "val1", t); + client.set("test2", 5, "val2", t); + Map vals=client.getBulk(t, "test1", "test2", "test3"); + assertEquals(2, vals.size()); + assertEquals("val1", vals.get("test1")); + assertEquals("val2", vals.get("test2")); + } + + public void testAsyncGetBulkVarargWithTranscoder() throws Exception { + Transcoder t=new TestTranscoder(); + assertEquals(0, client.getBulk(t, "test1", "test2", "test3").size()); + client.set("test1", 5, "val1", t); + client.set("test2", 5, "val2", t); + Future> vals=client.asyncGetBulk(t, + "test1", "test2", "test3"); + assertEquals(2, vals.get().size()); + assertEquals("val1", vals.get().get("test1")); + assertEquals("val2", vals.get().get("test2")); + } + + public void testAsyncGetBulkWithTranscoderIterator() throws Exception { + ArrayList keys = new ArrayList(); + keys.add("test1"); + keys.add("test2"); + keys.add("test3"); + + ArrayList> tcs = new ArrayList>(keys.size()); + for (String key : keys) { + tcs.add(new TestWithKeyTranscoder(key)); + } + + // Any transcoders listed after list of keys should be + // ignored. + for (String key : keys) { + tcs.add(new TestWithKeyTranscoder(key)); + } + + assertEquals(0, client.asyncGetBulk(keys, tcs.listIterator()).get().size()); + + client.set(keys.get(0), 5, "val1", tcs.get(0)); + client.set(keys.get(1), 5, "val2", tcs.get(1)); + Future> vals=client.asyncGetBulk(keys, tcs.listIterator()); + assertEquals(2, vals.get().size()); + assertEquals("val1", vals.get().get(keys.get(0))); + assertEquals("val2", vals.get().get(keys.get(1))); + + // Set with one transcoder with the proper key and get + // with another transcoder with the wrong key. + keys.add(0, "test4"); + Transcoder encodeTranscoder = new TestWithKeyTranscoder(keys.get(0)); + client.set(keys.get(0), 5, "val4", encodeTranscoder).get(); + + Transcoder decodeTranscoder = new TestWithKeyTranscoder("not " + keys.get(0)); + tcs.add(0, decodeTranscoder); + try { + client.asyncGetBulk(keys, tcs.listIterator()).get(); + fail("Expected ExecutionException caused by key mismatch"); + } catch (java.util.concurrent.ExecutionException e) { + // pass + } + } + + public void testAvailableServers() { + client.getVersions(); + assertEquals(new ArrayList( + Collections.singleton(getExpectedVersionSource())), + stringify(client.getAvailableServers())); + } + + public void testUnavailableServers() { + client.getVersions(); + assertEquals(Collections.emptyList(), client.getUnavailableServers()); + } + + protected abstract String getExpectedVersionSource(); + + public void testGetVersions() throws Exception { + Map vs=client.getVersions(); + assertEquals(1, vs.size()); + Map.Entry me=vs.entrySet().iterator().next(); + assertEquals(getExpectedVersionSource(), me.getKey().toString()); + assertNotNull(me.getValue()); + } + + public void testNonexistentMutate() throws Exception { + assertEquals(-1, client.incr("nonexistent", 1)); + assertEquals(-1, client.decr("nonexistent", 1)); + } + + public void testMutateWithDefault() throws Exception { + assertEquals(3, client.incr("mtest", 1, 3)); + assertEquals(4, client.incr("mtest", 1, 3)); + assertEquals(3, client.decr("mtest", 1, 9)); + assertEquals(9, client.decr("mtest2", 1, 9)); + } + + public void testMutateWithDefaultAndExp() throws Exception { + assertEquals(3, client.incr("mtest", 1, 3, 1)); + assertEquals(4, client.incr("mtest", 1, 3, 1)); + assertEquals(3, client.decr("mtest", 1, 9, 1)); + assertEquals(9, client.decr("mtest2", 1, 9, 1)); + Thread.sleep(2000); + assertNull(client.get("mtest")); + } + + public void testAsyncIncrement() throws Exception { + String k="async-incr"; + client.set(k, 0, "5"); + Future f = client.asyncIncr(k, 1); + assertEquals(6, (long)f.get()); + } + + public void testAsyncIncrementNonExistent() throws Exception { + String k="async-incr-non-existent"; + Future f = client.asyncIncr(k, 1); + assertEquals(-1, (long)f.get()); + } + + public void testAsyncDecrement() throws Exception { + String k="async-decr"; + client.set(k, 0, "5"); + Future f = client.asyncDecr(k, 1); + assertEquals(4, (long)f.get()); + } + + public void testAsyncDecrementNonExistent() throws Exception { + String k="async-decr-non-existent"; + Future f = client.asyncDecr(k, 1); + assertEquals(-1, (long)f.get()); + } + + public void testConcurrentMutation() throws Throwable { + int num=SyncThread.getDistinctResultCount(10, new Callable(){ + public Long call() throws Exception { + return client.incr("mtest", 1, 11); + }}); + assertEquals(10, num); + } + + public void testImmediateDelete() throws Exception { + assertNull(client.get("test1")); + client.set("test1", 5, "test1value"); + assertEquals("test1value", client.get("test1")); + client.delete("test1"); + assertNull(client.get("test1")); + } + + public void testFlush() throws Exception { + assertNull(client.get("test1")); + client.set("test1", 5, "test1value"); + client.set("test2", 5, "test2value"); + assertEquals("test1value", client.get("test1")); + assertEquals("test2value", client.get("test2")); + assertTrue(client.flush().get()); + assertNull(client.get("test1")); + assertNull(client.get("test2")); + } + + public void testGracefulShutdown() throws Exception { + for(int i=0; i<1000; i++) { + client.set("t" + i, 10, i); + } + assertTrue("Couldn't shut down within five seconds", + client.shutdown(5, TimeUnit.SECONDS)); + + // Get a new client + initClient(); + Collection keys=new ArrayList(); + for(int i=0; i<1000; i++) { + keys.add("t" + i); + } + Map m=client.getBulk(keys); + assertEquals(1000, m.size()); + for(int i=0; i<1000; i++) { + assertEquals(i, m.get("t" + i)); + } + } + + public void testSyncGetTimeouts() throws Exception { + final String key="timeoutTestKey"; + final String value="timeoutTestValue"; + // Shutting down the default client to get one with a short timeout. + assertTrue("Couldn't shut down within five seconds", + client.shutdown(5, TimeUnit.SECONDS)); + + initClient(new DefaultConnectionFactory() { + @Override + public long getOperationTimeout() { + return 1; + } + }); + + client.set(key, 0, value); + try { + for(int i=0; i<1000000; i++) { + client.get(key); + } + throw new Exception("Didn't get a timeout."); + } catch(OperationTimeoutException e) { + System.out.println("Got a timeout."); + } + if(value.equals(client.asyncGet(key).get(1, TimeUnit.SECONDS))) { + System.out.println("Got the right value."); + } else { + throw new Exception("Didn't get the expected value."); + } + } + + public void xtestGracefulShutdownTooSlow() throws Exception { + for(int i=0; i<10000; i++) { + client.set("t" + i, 10, i); + } + assertFalse("Weird, shut down too fast", + client.shutdown(1, TimeUnit.MILLISECONDS)); + + try { + Map m = client.getVersions(); + fail("Expected failure, got " + m); + } catch(IllegalStateException e) { + assertEquals("Shutting down", e.getMessage()); + } + + // Get a new client + initClient(); + } + + public void testStupidlyLargeSetAndSizeOverride() throws Exception { + Random r=new Random(); + SerializingTranscoder st=new SerializingTranscoder(Integer.MAX_VALUE); + + st.setCompressionThreshold(Integer.MAX_VALUE); + + byte data[]=new byte[10*1024*1024]; + r.nextBytes(data); + + try { + client.set("bigassthing", 60, data, st).get(); + fail("Didn't fail setting bigass thing."); + } catch(ExecutionException e) { + e.printStackTrace(); + OperationException oe=(OperationException)e.getCause(); + assertSame(OperationErrorType.SERVER, oe.getType()); + } + + // But I should still be able to do something. + client.set("k", 5, "Blah"); + assertEquals("Blah", client.get("k")); + } + + public void testStupidlyLargeSet() throws Exception { + Random r=new Random(); + SerializingTranscoder st=new SerializingTranscoder(); + st.setCompressionThreshold(Integer.MAX_VALUE); + + byte data[]=new byte[10*1024*1024]; + r.nextBytes(data); + + try { + client.set("bigassthing", 60, data, st).get(); + fail("Didn't fail setting bigass thing."); + } catch(IllegalArgumentException e) { + assertEquals("Cannot cache data larger than " + + CachedData.MAX_SIZE + " bytes " + + "(you tried to cache a " + data.length + " byte object)", + e.getMessage()); + } + + // But I should still be able to do something. + client.set("k", 5, "Blah"); + assertEquals("Blah", client.get("k")); + } + + public void testQueueAfterShutdown() throws Exception { + client.shutdown(); + try { + Object o=client.get("k"); + fail("Expected IllegalStateException, got " + o); + } catch(IllegalStateException e) { + // OK + } finally { + initClient(); // init for tearDown + } + } + + public void testMultiReqAfterShutdown() throws Exception { + client.shutdown(); + try { + Map m=client.getBulk("k1", "k2", "k3"); + fail("Expected IllegalStateException, got " + m); + } catch(IllegalStateException e) { + // OK + } finally { + initClient(); // init for tearDown + } + } + + public void testBroadcastAfterShutdown() throws Exception { + client.shutdown(); + try { + Future f=client.flush(); + fail("Expected IllegalStateException, got " + f.get()); + } catch(IllegalStateException e) { + // OK + } finally { + initClient(); // init for tearDown + } + } + + public void testABunchOfCancelledOperations() throws Exception { + final String k="bunchOCancel"; + Collection> futures=new ArrayList>(); + for(int i=0; i<1000; i++) { + futures.add(client.set(k, 5, "xval")); + futures.add(client.asyncGet(k)); + } + Future sf=client.set(k, 5, "myxval"); + Future gf=client.asyncGet(k); + for(Future f : futures) { + f.cancel(true); + } + assertTrue(sf.get()); + assertEquals("myxval", gf.get()); + } + + public void testUTF8Key() throws Exception { + final String key = "junit.Здравствуйте." + System.currentTimeMillis(); + final String value = "Skiing rocks if you can find the time to go!"; + + assertTrue(client.set(key, 6000, value).get()); + Object output = client.get(key); + assertNotNull("output is null", output); + assertEquals("output is not equal", value, output); + } + + public void testUTF8KeyDelete() throws Exception { + final String key = "junit.Здравствуйте." + System.currentTimeMillis(); + final String value = "Skiing rocks if you can find the time to go!"; + + assertTrue(client.set(key, 6000, value).get()); + assertTrue(client.delete(key).get()); + assertNull(client.get(key)); + } + + public void testUTF8MultiGet() throws Exception { + final String value = "Skiing rocks if you can find the time to go!"; + Collection keys=new ArrayList(); + for(int i=0; i<50; i++) { + final String key = "junit.Здравствуйте." + + System.currentTimeMillis() + "." + i; + assertTrue(client.set(key, 6000, value).get()); + keys.add(key); + } + + Map vals = client.getBulk(keys); + assertEquals(keys.size(), vals.size()); + for(Object o : vals.values()) { + assertEquals(value, o); + } + assertTrue(keys.containsAll(vals.keySet())); + } + + public void testUTF8Value() throws Exception { + final String key = "junit.plaintext." + System.currentTimeMillis(); + final String value = "Здравствуйте Здравствуйте Здравствуйте " + + "Skiing rocks if you can find the time to go!"; + + assertTrue(client.set(key, 6000, value).get()); + Object output = client.get(key); + assertNotNull("output is null", output); + assertEquals("output is not equal", value, output); + } + + public void testAppend() throws Exception { + final String key="append.key"; + assertTrue(client.set(key, 5, "test").get()); + assertTrue(client.append(0, key, "es").get()); + assertEquals("testes", client.get(key)); + } + + public void testPrepend() throws Exception { + final String key="prepend.key"; + assertTrue(client.set(key, 5, "test").get()); + assertTrue(client.prepend(0, key, "es").get()); + assertEquals("estest", client.get(key)); + } + + public void testAppendNoSuchKey() throws Exception { + final String key="append.missing"; + assertFalse(client.append(0, key, "es").get()); + assertNull(client.get(key)); + } + + public void testPrependNoSuchKey() throws Exception { + final String key="prepend.missing"; + assertFalse(client.prepend(0, key, "es").get()); + assertNull(client.get(key)); + } + + private static class TestTranscoder implements Transcoder { + private static final int flags=238885206; + + public String decode(CachedData d) { + assert d.getFlags() == flags + : "expected " + flags + " got " + d.getFlags(); + return new String(d.getData()); + } + + public CachedData encode(String o) { + return new CachedData(flags, o.getBytes(), getMaxSize()); + } + + public int getMaxSize() { + return CachedData.MAX_SIZE; + } + + public boolean asyncDecode(CachedData d) { + return false; + } + } + + private static class TestWithKeyTranscoder implements Transcoder { + private static final int flags=238885207; + + private final String key; + + TestWithKeyTranscoder(String k) { + key = k; + } + + public String decode(CachedData d) { + assert d.getFlags() == flags + : "expected " + flags + " got " + d.getFlags(); + + ByteBuffer bb = ByteBuffer.wrap(d.getData()); + + int keyLength = bb.getInt(); + byte[] keyBytes = new byte[keyLength]; + bb.get(keyBytes); + String k = new String(keyBytes); + + assertEquals(key, k); + + int valueLength = bb.getInt(); + byte[] valueBytes = new byte[valueLength]; + bb.get(valueBytes); + + return new String(valueBytes); + } + + public CachedData encode(String o) { + byte[] keyBytes = key.getBytes(); + byte[] valueBytes = o.getBytes(); + int length = 4 + keyBytes.length + 4 + valueBytes.length; + byte[] bytes = new byte[length]; + + ByteBuffer bb = ByteBuffer.wrap(bytes); + bb.putInt(keyBytes.length).put(keyBytes); + bb.putInt(valueBytes.length).put(valueBytes); + + return new CachedData(flags, bytes, getMaxSize()); + } + + public int getMaxSize() { + return CachedData.MAX_SIZE; + } + + public boolean asyncDecode(CachedData d) { + return false; + } + } +} diff --git a/src/test/java/net/spy/memcached/QueueOverflowTest.java b/src/test/java/net/spy/memcached/QueueOverflowTest.java new file mode 100644 index 000000000..4b07dd17b --- /dev/null +++ b/src/test/java/net/spy/memcached/QueueOverflowTest.java @@ -0,0 +1,131 @@ +package net.spy.memcached; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.ops.Operation; + +/** + * Test queue overflow. + */ +public class QueueOverflowTest extends ClientBaseCase { + + @Override + protected void initClient() throws Exception { + + // We're creating artificially constrained queues with the explicit + // goal of overrunning them to verify the client will still be + // functional after such conditions occur. + initClient(new DefaultConnectionFactory(5, 1024) { + @Override + public MemcachedConnection createConnection( + List addrs) throws IOException { + MemcachedConnection rv = super.createConnection(addrs); + return rv; + } + @Override + public long getOperationTimeout() { + return 1000; + } + @Override + public BlockingQueue createOperationQueue() { + return new ArrayBlockingQueue(getOpQueueLen()); + } + @Override + public BlockingQueue createReadOperationQueue() { + return new ArrayBlockingQueue( + (int) (getOpQueueLen() * 1.1)); + } + @Override + public BlockingQueue createWriteOperationQueue() { + return createOperationQueue(); + } + @Override + public boolean shouldOptimize() { + return false; + } + @Override + public long getOpQueueMaxBlockTime() { + return 0; + } + + }); + } + + private void runOverflowTest(byte b[]) throws Exception { + Collection> c=new ArrayList>(); + try { + for(int i=0; i<1000; i++) { + c.add(client.set("k" + i, 0, b)); + } + fail("Didn't catch an illegal state exception"); + } catch(IllegalStateException e) { + // expected + } + try { + Thread.sleep(50); + for(Future f : c) { + f.get(1, TimeUnit.SECONDS); + } + } catch(TimeoutException e) { + // OK, at least we got one back. + } catch(ExecutionException e) { + // OK, at least we got one back. + } + Thread.sleep(500); + assertTrue("Was not able to set a key after failure.", + client.set("kx", 0, "woo").get(10, TimeUnit.SECONDS)); + } + + public void testOverflowingInputQueue() throws Exception { + runOverflowTest(new byte[]{1}); + } + + public void testOverflowingWriteQueue() throws Exception { + byte[] b=new byte[8192]; + Random r=new Random(); + r.nextBytes(b); + runOverflowTest(b); + } + + public void testOverflowingReadQueue() throws Exception { + byte[] b=new byte[8192]; + Random r=new Random(); + r.nextBytes(b); + client.set("x", 0, b); + + Collection> c=new ArrayList>(); + try { + for(int i=0; i<1000; i++) { + c.add(client.asyncGet("x")); + } + fail("Didn't catch an illegal state exception"); + } catch(IllegalStateException e) { + // expected + } + try { + Thread.sleep(50); + for(Future f : c) { + assertTrue(Arrays.equals(b, + (byte[])f.get(5, TimeUnit.SECONDS))); + } + } catch(TimeoutException e) { + // OK, just want to make sure the client doesn't crash + } catch(ExecutionException e) { + // OK, at least we got one back. + } + Thread.sleep(500); + assertTrue(client.set("kx", 0, "woo").get(5, TimeUnit.SECONDS)); + } +} diff --git a/src/test/java/net/spy/memcached/RedistributeFailureModeTest.java b/src/test/java/net/spy/memcached/RedistributeFailureModeTest.java new file mode 100644 index 000000000..dc4d205e1 --- /dev/null +++ b/src/test/java/net/spy/memcached/RedistributeFailureModeTest.java @@ -0,0 +1,69 @@ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + + +public class RedistributeFailureModeTest extends ClientBaseCase { + + private String serverList; + + @Override + protected void setUp() throws Exception { + serverList="127.0.0.1:11211 127.0.0.1:11311"; + super.setUp(); + } + + @Override + protected void tearDown() throws Exception { + serverList="127.0.0.1:11211"; + super.tearDown(); + } + + @Override + protected void initClient(ConnectionFactory cf) throws Exception { + client=new MemcachedClient(cf, AddrUtil.getAddresses(serverList)); + } + + @Override + protected void initClient() throws Exception { + initClient(new DefaultConnectionFactory() { + @Override + public FailureMode getFailureMode() { + return FailureMode.Redistribute; + } + }); + } + + @Override + protected void flushPause() throws InterruptedException { + Thread.sleep(100); + } + + // Just to make sure the sequence is being handled correctly + public void testMixedSetsAndUpdates() throws Exception { + Collection> futures=new ArrayList>(); + Collection keys=new ArrayList(); + Thread.sleep(100); + for(int i=0; i<100; i++) { + String key="k" + i; + futures.add(client.set(key, 10, key)); + futures.add(client.add(key, 10, "a" + i)); + keys.add(key); + } + Map m=client.getBulk(keys); + assertEquals(100, m.size()); + for(Map.Entry me : m.entrySet()) { + assertEquals(me.getKey(), me.getValue()); + } + for(Iterator> i=futures.iterator();i.hasNext();) { + assertTrue(i.next().get(10, TimeUnit.MILLISECONDS)); + assertFalse(i.next().get(10, TimeUnit.MILLISECONDS)); + } + System.err.println(getName() + " complete."); + } +} diff --git a/src/test/java/net/spy/memcached/TimeoutTest.java b/src/test/java/net/spy/memcached/TimeoutTest.java new file mode 100644 index 000000000..319b1507e --- /dev/null +++ b/src/test/java/net/spy/memcached/TimeoutTest.java @@ -0,0 +1,70 @@ +package net.spy.memcached; + +public class TimeoutTest extends ClientBaseCase { + + @Override + protected void tearDown() throws Exception { + // override teardown to avoid the flush phase + client.shutdown(); + } + + @Override + protected void initClient() throws Exception { + client=new MemcachedClient(new DefaultConnectionFactory() { + @Override + public long getOperationTimeout() { + return 1; + } + @Override + public FailureMode getFailureMode() { + return FailureMode.Retry; + }}, + AddrUtil.getAddresses("127.0.0.1:64213")); + } + + private void tryTimeout(String name, Runnable r) { + try { + r.run(); + fail("Expected timeout in " + name); + } catch(OperationTimeoutException e) { + // pass + } + } + + public void testCasTimeout() { + tryTimeout("cas", new Runnable() {public void run() { + client.cas("k", 1, "blah"); + }}); + } + + public void testGetsTimeout() { + tryTimeout("gets", new Runnable() {public void run() { + client.gets("k"); + }}); + } + + public void testGetTimeout() { + tryTimeout("get", new Runnable() {public void run() { + client.get("k"); + }}); + } + + public void testGetBulkTimeout() { + tryTimeout("getbulk", new Runnable() {public void run() { + client.getBulk("k", "k2"); + }}); + } + + public void testIncrTimeout() { + tryTimeout("incr", new Runnable() {public void run() { + client.incr("k", 1); + }}); + } + + public void testIncrWithDefTimeout() { + tryTimeout("incrWithDef", new Runnable() {public void run() { + client.incr("k", 1, 5); + }}); + } + +} diff --git a/src/test/java/net/spy/memcached/compat/BaseMockCase.java b/src/test/java/net/spy/memcached/compat/BaseMockCase.java new file mode 100644 index 000000000..09cd76c81 --- /dev/null +++ b/src/test/java/net/spy/memcached/compat/BaseMockCase.java @@ -0,0 +1,14 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.compat; + +import org.jmock.MockObjectTestCase; + +/** + * Base test case for mock object tests. + */ +public abstract class BaseMockCase extends MockObjectTestCase { + + // Nothing special needed here. + +} diff --git a/src/test/java/net/spy/memcached/compat/log/LoggingTest.java b/src/test/java/net/spy/memcached/compat/log/LoggingTest.java new file mode 100644 index 000000000..977d102d1 --- /dev/null +++ b/src/test/java/net/spy/memcached/compat/log/LoggingTest.java @@ -0,0 +1,147 @@ +// Copyright (c) 2005 Dustin Sallings + +package net.spy.memcached.compat.log; + +import junit.framework.TestCase; + +// XXX: This really needs to get log4j configured first. + +/** + * Make sure logging is enabled. + */ +public class LoggingTest extends TestCase { + + private Logger logger=null; + + /** + * Get an instance of LoggingTest. + */ + public LoggingTest(String name) { + super(name); + } + + /** + * Set up logging. + */ + @Override + public void setUp() { + logger=LoggerFactory.getLogger(getClass()); + } + + /** + * Make sure logging is enabled. + */ + public void testDebugLogging() { +// assertTrue("Debug logging is not enabled", logger.isDebugEnabled()); + logger.debug("debug message"); + } + + /** + * Make sure info is enabled, and test it. + */ + public void testInfoLogging() { + assertTrue(logger.isInfoEnabled()); + logger.info("info message"); + } + + /** + * Test other log stuff. + */ + public void testOtherLogging() { + logger.warn("warn message"); + logger.warn("test %s", "message"); + logger.error("error message"); + logger.error("test %s", "message"); + logger.fatal("fatal message"); + logger.fatal("test %s", "message"); + logger.log(null, "test null", null); + assertEquals(getClass().getName(), logger.getName()); + } + + /** + * Make sure we're using log4j. + */ + public void testLog4j() { +// Logger l=LoggerFactory.getLogger(getClass()); +// assertEquals("net.spy.compat.log.Log4JLogger", l.getClass().getName()); + } + + /** + * Test the sun logger. + */ + public void testSunLogger() { + Logger l=new SunLogger(getClass().getName()); + assertFalse(l.isDebugEnabled()); + l.debug("debug message"); + assertTrue(l.isInfoEnabled()); + l.info("info message"); + l.warn("warn message"); + l.error("error message"); + l.fatal("fatal message"); + l.fatal("fatal message with exception", new Exception()); + l.log(null, "test null", null); + l.log(null, "null message with exception and no requestor", + new Exception()); + } + + /** + * Test the default logger. + */ + public void testMyLogger() { + Logger l=new DefaultLogger(getClass().getName()); + assertFalse(l.isDebugEnabled()); + l.debug("debug message"); + assertTrue(l.isInfoEnabled()); + l.info("info message"); + l.warn("warn message"); + l.error("error message"); + l.fatal("fatal message"); + l.fatal("fatal message with exception", new Exception()); + l.log(null, "test null", null); + l.log(null, "null message with exception and no requestor", + new Exception()); + + try { + l=new DefaultLogger(null); + fail("Allowed me to create a logger with null name: " + l); + } catch(NullPointerException e) { + assertEquals("Logger name may not be null.", e.getMessage()); + } + } + + /** + * Test stringing levels. + */ + public void testLevelStrings() { + assertEquals("{LogLevel: DEBUG}", String.valueOf(Level.DEBUG)); + assertEquals("{LogLevel: INFO}", String.valueOf(Level.INFO)); + assertEquals("{LogLevel: WARN}", String.valueOf(Level.WARN)); + assertEquals("{LogLevel: ERROR}", String.valueOf(Level.ERROR)); + assertEquals("{LogLevel: FATAL}", String.valueOf(Level.FATAL)); + assertEquals("DEBUG", Level.DEBUG.name()); + assertEquals("INFO", Level.INFO.name()); + assertEquals("WARN", Level.WARN.name()); + assertEquals("ERROR", Level.ERROR.name()); + assertEquals("FATAL", Level.FATAL.name()); + } + + /** + * Test picking up an exception argument. + */ + public void testExceptionArg() throws Exception { + Object[] args=new Object[]{"a", 42, new Exception("test")}; + Throwable t=((AbstractLogger)logger).getThrowable(args); + assertNotNull(t); + assertEquals("test", t.getMessage()); + } + + /** + * Test when the last argument is not an exception. + */ + public void testNoExceptionArg() throws Exception { + Object[] args=new Object[]{"a", 42, new Exception("test"), "x"}; + Throwable t=((AbstractLogger)logger).getThrowable(args); + assertNull(t); + } + +} diff --git a/src/test/java/net/spy/memcached/internal/CheckedOperationTimeoutExceptionTest.java b/src/test/java/net/spy/memcached/internal/CheckedOperationTimeoutExceptionTest.java new file mode 100644 index 000000000..82489082c --- /dev/null +++ b/src/test/java/net/spy/memcached/internal/CheckedOperationTimeoutExceptionTest.java @@ -0,0 +1,84 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.internal; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; + +import junit.framework.TestCase; +import net.spy.memcached.MockMemcachedNode; +import net.spy.memcached.ops.Operation; +import net.spy.memcached.protocol.BaseOperationImpl; + +public class CheckedOperationTimeoutExceptionTest extends TestCase { + + public void testSingleOperation() { + Operation op = buildOp(11211); + assertEquals(CheckedOperationTimeoutException.class.getName() + + ": test - failing node: localhost:11211 [WRITING] [MOCK_STATE]", + new CheckedOperationTimeoutException("test", op).toString()); + } + + public void testNullNode() { + Operation op = new TestOperation(); + assertEquals(CheckedOperationTimeoutException.class.getName() + + ": test - failing node: [WRITING]", + new CheckedOperationTimeoutException("test", op).toString()); + } + + public void testNullOperation() { + assertEquals(CheckedOperationTimeoutException.class.getName() + + ": test - failing node: ", + new CheckedOperationTimeoutException("test", + (Operation)null).toString()); + } + + + public void testMultipleOperation() { + Collection ops = new ArrayList(); + ops.add(buildOp(11211)); + ops.add(buildOp(64212)); + assertEquals(CheckedOperationTimeoutException.class.getName() + + ": test - failing nodes: localhost:11211 [WRITING] [MOCK_STATE], localhost:64212 [WRITING] [MOCK_STATE]", + new CheckedOperationTimeoutException("test", ops).toString()); + } + + private TestOperation buildOp(int portNum) { + TestOperation op = new TestOperation(); + MockMemcachedNode node = new MockMemcachedNode( + InetSocketAddress.createUnresolved("localhost", portNum)); + op.setHandlingNode(node); + return op; + } + + static class TestOperation extends BaseOperationImpl implements Operation { + + @Override + public void initialize() { + throw new RuntimeException("Not implemented."); + } + + @Override + public void readFromBuffer(ByteBuffer data) throws IOException { + throw new RuntimeException("Not implemented"); + } + + } +} diff --git a/src/test/java/net/spy/memcached/internal/SingleElementInfiniteIteratorTest.java b/src/test/java/net/spy/memcached/internal/SingleElementInfiniteIteratorTest.java new file mode 100644 index 000000000..6d0814961 --- /dev/null +++ b/src/test/java/net/spy/memcached/internal/SingleElementInfiniteIteratorTest.java @@ -0,0 +1,31 @@ +package net.spy.memcached.internal; + +import java.lang.UnsupportedOperationException; + +import junit.framework.TestCase; + +public class SingleElementInfiniteIteratorTest extends TestCase { + private static final String CONSTANT = "foo"; + private SingleElementInfiniteIterator iterator; + + @Override + protected void setUp() { + iterator = new SingleElementInfiniteIterator(CONSTANT); + } + + public void testHasNextAndNext() { + for (int i=0; i<100; ++i) { + assertTrue(iterator.hasNext()); + assertSame(CONSTANT, iterator.next()); + } + } + + public void testRemove() { + try { + iterator.remove(); + fail("Expected UnsupportedOperationException on a remove."); + } + catch (UnsupportedOperationException e) { + } + } +} diff --git a/src/test/java/net/spy/memcached/protocol/ascii/BaseOpTest.java b/src/test/java/net/spy/memcached/protocol/ascii/BaseOpTest.java new file mode 100644 index 000000000..6e0c55cf5 --- /dev/null +++ b/src/test/java/net/spy/memcached/protocol/ascii/BaseOpTest.java @@ -0,0 +1,138 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.protocol.ascii; + +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.LinkedList; +import java.util.List; + +import net.spy.memcached.compat.BaseMockCase; + +/** + * Test the basic operation buffer handling stuff. + */ +public class BaseOpTest extends BaseMockCase { + + public void testAssertions() { + try { + assert false; + fail("Assertions are not enabled."); + } catch(AssertionError e) { + // OK + } + } + + public void testDataReadType() throws Exception { + SimpleOp op=new SimpleOp(OperationReadType.DATA); + assertSame(OperationReadType.DATA, op.getReadType()); + // Make sure lines aren't handled + try { + op.handleLine("x"); + fail("Handled a line in data mode"); + } catch(AssertionError e) { + // ok + } + op.setBytesToRead(2); + op.handleRead(ByteBuffer.wrap("hi".getBytes())); + } + + public void testLineReadType() throws Exception { + SimpleOp op=new SimpleOp(OperationReadType.LINE); + assertSame(OperationReadType.LINE, op.getReadType()); + // Make sure lines aren't handled + try { + op.handleRead(ByteBuffer.allocate(3)); + fail("Handled data in line mode"); + } catch(AssertionError e) { + // ok + } + op.handleLine("x"); + } + + public void testLineParser() throws Exception { + String input="This is a multiline string\r\nhere is line two\r\n"; + ByteBuffer b=ByteBuffer.wrap(input.getBytes()); + SimpleOp op=new SimpleOp(OperationReadType.LINE); + op.linesToRead=2; + op.readFromBuffer(b); + assertEquals("This is a multiline string", op.getLines().get(0)); + assertEquals("here is line two", op.getLines().get(1)); + op.setBytesToRead(2); + op.readFromBuffer(ByteBuffer.wrap("xy".getBytes())); + byte[] expected={'x', 'y'}; + assertTrue("Expected " + Arrays.toString(expected) + " but got " + + Arrays.toString(op.getCurentBytes()), + Arrays.equals(expected, op.getCurentBytes())); + } + + public void testPartialLine() throws Exception { + String input1="this is a "; + String input2="test\r\n"; + ByteBuffer b=ByteBuffer.allocate(20); + SimpleOp op=new SimpleOp(OperationReadType.LINE); + + b.put(input1.getBytes()); + b.flip(); + op.readFromBuffer(b); + assertNull(op.getCurrentLine()); + b.clear(); + b.put(input2.getBytes()); + b.flip(); + op.readFromBuffer(b); + assertEquals("this is a test", op.getCurrentLine()); + } + + private static class SimpleOp extends OperationImpl { + + private final LinkedList lines=new LinkedList(); + private byte[] currentBytes=null; + private int bytesToRead=0; + public int linesToRead=1; + + public SimpleOp(OperationReadType t) { + setReadType(t); + } + + public void setBytesToRead(int to) { + bytesToRead=to; + } + + public String getCurrentLine() { + return lines.isEmpty()?null:lines.getLast(); + } + + public List getLines() { + return lines; + } + + public byte[] getCurentBytes() { + return currentBytes; + } + + @Override + public void handleLine(String line) { + assert getReadType() == OperationReadType.LINE; + lines.add(line); + if(--linesToRead == 0) { + setReadType(OperationReadType.DATA); + } + } + + @Override + public void handleRead(ByteBuffer data) { + assert getReadType() == OperationReadType.DATA; + assert bytesToRead > 0; + if(bytesToRead > 0) { + currentBytes=new byte[bytesToRead]; + data.get(currentBytes); + } + } + + @Override + public void initialize() { + setBuffer(ByteBuffer.allocate(0)); + } + + } +} diff --git a/src/test/java/net/spy/memcached/protocol/ascii/ExtensibleOperationImpl.java b/src/test/java/net/spy/memcached/protocol/ascii/ExtensibleOperationImpl.java new file mode 100644 index 000000000..38770a9e4 --- /dev/null +++ b/src/test/java/net/spy/memcached/protocol/ascii/ExtensibleOperationImpl.java @@ -0,0 +1,18 @@ +package net.spy.memcached.protocol.ascii; + +import net.spy.memcached.ops.OperationCallback; + +/** + * For testing, allow subclassing of the operation impl. + */ +public abstract class ExtensibleOperationImpl extends OperationImpl { + + public ExtensibleOperationImpl() { + super(); + } + + public ExtensibleOperationImpl(OperationCallback cb) { + super(cb); + } + +} diff --git a/src/test/java/net/spy/memcached/protocol/ascii/OperationExceptionTest.java b/src/test/java/net/spy/memcached/protocol/ascii/OperationExceptionTest.java new file mode 100644 index 000000000..34df1fe2e --- /dev/null +++ b/src/test/java/net/spy/memcached/protocol/ascii/OperationExceptionTest.java @@ -0,0 +1,41 @@ +package net.spy.memcached.protocol.ascii; + +import junit.framework.TestCase; +import net.spy.memcached.ops.OperationErrorType; +import net.spy.memcached.ops.OperationException; + +/** + * Test operation exception constructors and accessors and stuff. + */ +public class OperationExceptionTest extends TestCase { + + public void testEmpty() { + OperationException oe=new OperationException(); + assertSame(OperationErrorType.GENERAL, oe.getType()); + assertEquals("OperationException: GENERAL", String.valueOf(oe)); + } + + public void testServer() { + OperationException oe=new OperationException( + OperationErrorType.SERVER, "SERVER_ERROR figures"); + assertSame(OperationErrorType.SERVER, oe.getType()); + assertEquals("OperationException: SERVER: SERVER_ERROR figures", + String.valueOf(oe)); + } + + public void testClient() { + OperationException oe=new OperationException( + OperationErrorType.CLIENT, "CLIENT_ERROR nope"); + assertSame(OperationErrorType.CLIENT, oe.getType()); + assertEquals("OperationException: CLIENT: CLIENT_ERROR nope", + String.valueOf(oe)); + } + + public void testGeneral() { + // General type doesn't have additional info + OperationException oe=new OperationException( + OperationErrorType.GENERAL, "GENERAL wtf"); + assertSame(OperationErrorType.GENERAL, oe.getType()); + assertEquals("OperationException: GENERAL", String.valueOf(oe)); + } +} diff --git a/src/test/java/net/spy/memcached/protocol/ascii/OperationFactoryTest.java b/src/test/java/net/spy/memcached/protocol/ascii/OperationFactoryTest.java new file mode 100644 index 000000000..490f8ce66 --- /dev/null +++ b/src/test/java/net/spy/memcached/protocol/ascii/OperationFactoryTest.java @@ -0,0 +1,49 @@ +package net.spy.memcached.protocol.ascii; + +import net.spy.memcached.OperationFactory; +import net.spy.memcached.OperationFactoryTestBase; +import net.spy.memcached.ops.MutatorOperation; +import net.spy.memcached.ops.Mutator; + +public class OperationFactoryTest extends OperationFactoryTestBase { + + @Override + protected OperationFactory getOperationFactory() { + return new AsciiOperationFactory(); + } + + @Override + public void testMutatorOperationIncrCloning() { + int exp = 823862; + long def = 28775; + int by = 7735; + MutatorOperation op = ofact.mutate(Mutator.incr, TEST_KEY, by, def, + exp, genericCallback); + + MutatorOperation op2 = cloneOne(MutatorOperation.class, op); + assertKey(op2); + assertEquals(-1, op2.getExpiration()); + assertEquals(-1, op2.getDefault()); + assertEquals(by, op2.getBy()); + assertSame(Mutator.incr, op2.getType()); + assertCallback(op2); + } + + @Override + public void testMutatorOperationDecrCloning() { + int exp = 823862; + long def = 28775; + int by = 7735; + MutatorOperation op = ofact.mutate(Mutator.decr, TEST_KEY, by, def, + exp, genericCallback); + + MutatorOperation op2 = cloneOne(MutatorOperation.class, op); + assertKey(op2); + assertEquals(-1, op2.getExpiration()); + assertEquals(-1, op2.getDefault()); + assertEquals(by, op2.getBy()); + assertSame(Mutator.decr, op2.getType()); + assertCallback(op2); + } + +} diff --git a/src/test/java/net/spy/memcached/protocol/binary/OperationFactoryTest.java b/src/test/java/net/spy/memcached/protocol/binary/OperationFactoryTest.java new file mode 100644 index 000000000..fdb7eab10 --- /dev/null +++ b/src/test/java/net/spy/memcached/protocol/binary/OperationFactoryTest.java @@ -0,0 +1,13 @@ +package net.spy.memcached.protocol.binary; + +import net.spy.memcached.OperationFactory; +import net.spy.memcached.OperationFactoryTestBase; + +public class OperationFactoryTest extends OperationFactoryTestBase { + + @Override + protected OperationFactory getOperationFactory() { + return new BinaryOperationFactory(); + } + +} diff --git a/src/test/java/net/spy/memcached/protocol/binary/OperatonTest.java b/src/test/java/net/spy/memcached/protocol/binary/OperatonTest.java new file mode 100644 index 000000000..a3e5a5e6b --- /dev/null +++ b/src/test/java/net/spy/memcached/protocol/binary/OperatonTest.java @@ -0,0 +1,38 @@ +package net.spy.memcached.protocol.binary; + +import static net.spy.memcached.protocol.binary.OperationImpl.decodeInt; +import static net.spy.memcached.protocol.binary.OperationImpl.decodeUnsignedInt; +import junit.framework.TestCase; + +/** + * Test operation stuff. + */ +public class OperatonTest extends TestCase { + + public void testIntegerDecode() { + assertEquals(129, + decodeInt(new byte[]{0, 0, 0, (byte)0x81}, 0)); + assertEquals(129 * 256, + decodeInt(new byte[]{0, 0, (byte)0x81, 0}, 0)); + assertEquals(129 * 256 * 256, + decodeInt(new byte[]{0, (byte)0x81, 0, 0}, 0)); + assertEquals(129 * 256 * 256 * 256, + decodeInt(new byte[]{(byte)0x81, 0, 0, 0}, 0)); + } + + public void testUnsignedIntegerDecode() { + assertEquals(129, + decodeUnsignedInt(new byte[]{0, 0, 0, (byte)0x81}, 0)); + assertEquals(129 * 256, + decodeUnsignedInt(new byte[]{0, 0, (byte)0x81, 0}, 0)); + assertEquals(129 * 256 * 256, + decodeUnsignedInt(new byte[]{0, (byte)0x81, 0, 0}, 0)); + assertEquals(129L * 256L * 256L * 256L, + decodeUnsignedInt(new byte[]{(byte)0x81, 0, 0, 0}, 0)); + } + + public void testOperationStatusString() { + String s=String.valueOf(OperationImpl.STATUS_OK); + assertEquals("{OperationStatus success=true: OK}", s); + } +} diff --git a/src/test/java/net/spy/memcached/transcoders/BaseSerializingTranscoderTest.java b/src/test/java/net/spy/memcached/transcoders/BaseSerializingTranscoderTest.java new file mode 100644 index 000000000..386018c0b --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/BaseSerializingTranscoderTest.java @@ -0,0 +1,150 @@ +package net.spy.memcached.transcoders; + +import java.io.UnsupportedEncodingException; + +import junit.framework.TestCase; +import net.spy.memcached.CachedData; + +/** + * Base tests of the base serializing transcoder stuff. + */ +public class BaseSerializingTranscoderTest extends TestCase { + + private Exposer ex; + + @Override + protected void setUp() throws Exception { + super.setUp(); + ex=new Exposer(); + } + + public void testValidCharacterSet() { + ex.setCharset("KOI8"); + } + + public void testInvalidCharacterSet() { + try { + ex.setCharset("Dustin's Kick Ass Character Set"); + } catch(RuntimeException e) { + assertTrue(e.getCause() instanceof UnsupportedEncodingException); + } + } + + public void testCompressNull() { + try { + ex.compress(null); + fail("Expected an assertion error"); + } catch(NullPointerException e) { + // pass + } + } + + public void testDecodeStringNull() { + assertNull(ex.decodeString(null)); + } + + public void testDeserializeNull() { + assertNull(ex.deserialize(null)); + } + + public void testEncodeStringNull() { + try { + ex.encodeString(null); + fail("Expected an assertion error"); + } catch(NullPointerException e) { + // pass + } + } + + public void testSerializeNull() { + try { + ex.serialize(null); + fail("Expected an assertion error"); + } catch(NullPointerException e) { + // pass + } + } + + public void testDecompressNull() { + assertNull(ex.decompress(null)); + } + + public void testUndeserializable() throws Exception { + byte[] data={ + -84, -19, 0, 5, 115, 114, 0, 4, 84, 101, 115, 116, 2, 61, 102, + -87, -28, 17, 52, 30, 2, 0, 1, 73, 0, 9, 115, 111, 109, 101, + 116, 104, 105, 110, 103, 120, 112, 0, 0, 0, 5 + }; + assertNull(ex.deserialize(data)); + } + + public void testDeserializable() throws Exception { + byte[] data={-84, -19, 0, 5, 116, 0, 5, 104, 101, 108, 108, 111}; + assertEquals("hello", ex.deserialize(data)); + } + + public void testBadCharsetDecode() { + ex.overrideCharsetSet("Some Crap"); + try { + ex.encodeString("Woo!"); + fail("Expected runtime exception"); + } catch(RuntimeException e) { + assertSame(UnsupportedEncodingException.class, + e.getCause().getClass()); + } + } + + public void testBadCharsetEncode() { + ex.overrideCharsetSet("Some Crap"); + try { + ex.decodeString("Woo!".getBytes()); + fail("Expected runtime exception"); + } catch(RuntimeException e) { + assertSame(UnsupportedEncodingException.class, + e.getCause().getClass()); + } + } + + // Expose the protected methods so I can test them. + static class Exposer extends BaseSerializingTranscoder { + + public Exposer() { + super(CachedData.MAX_SIZE); + } + + public void overrideCharsetSet(String to) { + charset=to; + } + + @Override + public byte[] compress(byte[] in) { + return super.compress(in); + } + + @Override + public String decodeString(byte[] data) { + return super.decodeString(data); + } + + @Override + public byte[] decompress(byte[] in) { + return super.decompress(in); + } + + @Override + public Object deserialize(byte[] in) { + return super.deserialize(in); + } + + @Override + public byte[] encodeString(String in) { + return super.encodeString(in); + } + + @Override + public byte[] serialize(Object o) { + return super.serialize(o); + } + + } +} diff --git a/src/test/java/net/spy/memcached/transcoders/BaseTranscoderCase.java b/src/test/java/net/spy/memcached/transcoders/BaseTranscoderCase.java new file mode 100644 index 000000000..2ce5e551f --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/BaseTranscoderCase.java @@ -0,0 +1,181 @@ +package net.spy.memcached.transcoders; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Date; + +import net.spy.memcached.CachedData; +import net.spy.memcached.compat.BaseMockCase; + +/** + * Basic behavior validation for all transcoders that work with objects. + */ +public abstract class BaseTranscoderCase extends BaseMockCase { + + private Transcoder tc; + + protected void setTranscoder(Transcoder t) { + assert t != null; + tc=t; + } + + protected Transcoder getTranscoder() { + return tc; + } + + public void testSomethingBigger() throws Exception { + Collection dates=new ArrayList(); + for(int i=0; i<1024; i++) { + dates.add(new Date()); + } + CachedData d=tc.encode(dates); + assertEquals(dates, tc.decode(d)); + } + + public void testDate() throws Exception { + Date d=new Date(); + CachedData cd=tc.encode(d); + assertEquals(d, tc.decode(cd)); + } + + public void testLong() throws Exception { + assertEquals(923L, tc.decode(tc.encode(923L))); + } + + public void testInt() throws Exception { + assertEquals(923, tc.decode(tc.encode(923))); + } + + public void testShort() throws Exception { + assertEquals((short)923, tc.decode(tc.encode((short)923))); + } + + public void testChar() throws Exception { + assertEquals('c', tc.decode(tc.encode('c'))); + } + + public void testBoolean() throws Exception { + assertSame(Boolean.TRUE, tc.decode(tc.encode(true))); + assertSame(Boolean.FALSE, tc.decode(tc.encode(false))); + } + + public void testByte() throws Exception { + assertEquals((byte)-127, tc.decode(tc.encode((byte)-127))); + } + + public void testStringBuilder() throws Exception { + StringBuilder sb=new StringBuilder("test"); + StringBuilder sb2=(StringBuilder)tc.decode(tc.encode(sb)); + assertEquals(sb.toString(), sb2.toString()); + } + + public void testStringBuffer() throws Exception { + StringBuffer sb=new StringBuffer("test"); + StringBuffer sb2=(StringBuffer)tc.decode(tc.encode(sb)); + assertEquals(sb.toString(), sb2.toString()); + } + + + private void assertFloat(float f) { + assertEquals(f, tc.decode(tc.encode(f))); + } + + public void testFloat() throws Exception { + assertFloat(0f); + assertFloat(Float.MIN_VALUE); + assertFloat(Float.MAX_VALUE); + assertFloat(3.14f); + assertFloat(-3.14f); + assertFloat(Float.NaN); + assertFloat(Float.POSITIVE_INFINITY); + assertFloat(Float.NEGATIVE_INFINITY); + } + + private void assertDouble(double d) { + assertEquals(d, tc.decode(tc.encode(d))); + } + + public void testDouble() throws Exception { + assertDouble(0d); + assertDouble(Double.MIN_VALUE); + assertDouble(Double.MAX_VALUE); + assertDouble(3.14d); + assertDouble(-3.14d); + assertDouble(Double.NaN); + assertDouble(Double.POSITIVE_INFINITY); + assertDouble(Double.NEGATIVE_INFINITY); + } + + private void assertLong(long l) { + CachedData encoded=tc.encode(l); + long decoded=(Long)tc.decode(encoded); + assertEquals(l, decoded); + } + + /* + private void displayBytes(long l, byte[] encoded) { + System.out.print(l + " ["); + for(byte b : encoded) { + System.out.print((b<0?256+b:b) + " "); + } + System.out.println("]"); + } + */ + + public void testLongEncoding() throws Exception { + assertLong(Long.MIN_VALUE); + assertLong(1); + assertLong(23852); + assertLong(0L); + assertLong(-1); + assertLong(-23835); + assertLong(Long.MAX_VALUE); + } + + private void assertInt(int i) { + CachedData encoded=tc.encode(i); + int decoded=(Integer)tc.decode(encoded); + assertEquals(i, decoded); + } + + public void testIntEncoding() throws Exception { + assertInt(Integer.MIN_VALUE); + assertInt(83526); + assertInt(1); + assertInt(0); + assertInt(-1); + assertInt(-238526); + assertInt(Integer.MAX_VALUE); + } + + public void testBooleanEncoding() throws Exception { + assertTrue((Boolean)tc.decode(tc.encode(true))); + assertFalse((Boolean)tc.decode(tc.encode(false))); + } + + public void testByteArray() throws Exception { + byte[] a={'a', 'b', 'c'}; + CachedData cd=tc.encode(a); + assertTrue(Arrays.equals(a, cd.getData())); + assertTrue(Arrays.equals(a, (byte[])tc.decode(cd))); + } + + public void testStrings() throws Exception { + String s1="This is a simple test string."; + CachedData cd=tc.encode(s1); + assertEquals(getStringFlags(), cd.getFlags()); + assertEquals(s1, tc.decode(cd)); + } + + public void testUTF8String() throws Exception { + String s1="\u2013\u00f3\u2013\u00a5\u2014\u00c4\u2013\u221e\u2013" + + "\u2264\u2014\u00c5\u2014\u00c7\u2013\u2264\u2014\u00c9\u2013" + + "\u03c0, \u2013\u00ba\u2013\u220f\u2014\u00c4."; + CachedData cd=tc.encode(s1); + assertEquals(getStringFlags(), cd.getFlags()); + assertEquals(s1, tc.decode(cd)); + } + + protected abstract int getStringFlags(); +} diff --git a/src/test/java/net/spy/memcached/transcoders/CachedDataTest.java b/src/test/java/net/spy/memcached/transcoders/CachedDataTest.java new file mode 100644 index 000000000..4b0eec877 --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/CachedDataTest.java @@ -0,0 +1,21 @@ +package net.spy.memcached.transcoders; + +import junit.framework.TestCase; +import net.spy.memcached.CachedData; + +/** + * Test a couple aspects of CachedData. + */ +public class CachedDataTest extends TestCase { + + public void testToString() throws Exception { + String exp="{CachedData flags=13 data=[84, 104, 105, 115, 32, 105, " + + "115, 32, 97, 32, 115, 105, 109, 112, 108, 101, 32, 116, 101, " + + "115, 116, 32, 115, 116, 114, 105, 110, 103, 46]}"; + CachedData cd=new CachedData(13, + "This is a simple test string.".getBytes("UTF-8"), + CachedData.MAX_SIZE); + assertEquals(exp, String.valueOf(cd)); + } + +} diff --git a/src/test/java/net/spy/memcached/transcoders/IntegerTranscoderTest.java b/src/test/java/net/spy/memcached/transcoders/IntegerTranscoderTest.java new file mode 100644 index 000000000..28210b8df --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/IntegerTranscoderTest.java @@ -0,0 +1,28 @@ +package net.spy.memcached.transcoders; + +import junit.framework.TestCase; +import net.spy.memcached.CachedData; + +/** + * Test the integer transcoder. + */ +public class IntegerTranscoderTest extends TestCase { + + private IntegerTranscoder tc=null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + tc=new IntegerTranscoder(); + } + + public void testInt() throws Exception { + assertEquals(923, tc.decode(tc.encode(923)).intValue()); + } + + public void testBadFlags() throws Exception { + CachedData cd=tc.encode(9284); + assertNull(tc.decode(new CachedData(cd.getFlags()+1, cd.getData(), + CachedData.MAX_SIZE))); + } +} diff --git a/src/test/java/net/spy/memcached/transcoders/LongTranscoderTest.java b/src/test/java/net/spy/memcached/transcoders/LongTranscoderTest.java new file mode 100644 index 000000000..a2aef923a --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/LongTranscoderTest.java @@ -0,0 +1,28 @@ +package net.spy.memcached.transcoders; + +import junit.framework.TestCase; +import net.spy.memcached.CachedData; + +/** + * Test the long transcoder. + */ +public class LongTranscoderTest extends TestCase { + + private LongTranscoder tc=null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + tc=new LongTranscoder(); + } + + public void testLong() throws Exception { + assertEquals(923, tc.decode(tc.encode(923L)).longValue()); + } + + public void testBadFlags() throws Exception { + CachedData cd=tc.encode(9284L); + assertNull(tc.decode(new CachedData(cd.getFlags()+1, cd.getData(), + CachedData.MAX_SIZE))); + } +} diff --git a/src/test/java/net/spy/memcached/transcoders/SerializingTranscoderTest.java b/src/test/java/net/spy/memcached/transcoders/SerializingTranscoderTest.java new file mode 100644 index 000000000..2ffe5b75c --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/SerializingTranscoderTest.java @@ -0,0 +1,112 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +import java.util.Arrays; +import java.util.Calendar; + +import net.spy.memcached.CachedData; + +/** + * Test the serializing transcoder. + */ +public class SerializingTranscoderTest extends BaseTranscoderCase { + + private SerializingTranscoder tc; + private TranscoderUtils tu; + + @Override + protected void setUp() throws Exception { + super.setUp(); + tc=new SerializingTranscoder(); + setTranscoder(tc); + tu=new TranscoderUtils(true); + } + + public void testNonserializable() throws Exception { + try { + tc.encode(new Object()); + fail("Processed a non-serializable object."); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testCompressedStringNotSmaller() throws Exception { + String s1="This is a test simple string that will not be compressed."; + // Reduce the compression threshold so it'll attempt to compress it. + tc.setCompressionThreshold(8); + CachedData cd=tc.encode(s1); + // This should *not* be compressed because it is too small + assertEquals(0, cd.getFlags()); + assertTrue(Arrays.equals(s1.getBytes(), cd.getData())); + assertEquals(s1, tc.decode(cd)); + } + + public void testCompressedString() throws Exception { + // This one will actually compress + String s1="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; + tc.setCompressionThreshold(8); + CachedData cd=tc.encode(s1); + assertEquals(SerializingTranscoder.COMPRESSED, cd.getFlags()); + assertFalse(Arrays.equals(s1.getBytes(), cd.getData())); + assertEquals(s1, tc.decode(cd)); + } + + public void testObject() throws Exception { + Calendar c=Calendar.getInstance(); + CachedData cd=tc.encode(c); + assertEquals(SerializingTranscoder.SERIALIZED, cd.getFlags()); + assertEquals(c, tc.decode(cd)); + } + + public void testCompressedObject() throws Exception { + tc.setCompressionThreshold(8); + Calendar c=Calendar.getInstance(); + CachedData cd=tc.encode(c); + assertEquals(SerializingTranscoder.SERIALIZED + |SerializingTranscoder.COMPRESSED, cd.getFlags()); + assertEquals(c, tc.decode(cd)); + } + + public void testUnencodeable() throws Exception { + try { + CachedData cd=tc.encode(new Object()); + fail("Should fail to serialize, got" + cd); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testUndecodeable() throws Exception { + CachedData cd=new CachedData( + Integer.MAX_VALUE & + ~(SerializingTranscoder.COMPRESSED + | SerializingTranscoder.SERIALIZED), + tu.encodeInt(Integer.MAX_VALUE), + tc.getMaxSize()); + assertNull(tc.decode(cd)); + } + + public void testUndecodeableSerialized() throws Exception { + CachedData cd=new CachedData(SerializingTranscoder.SERIALIZED, + tu.encodeInt(Integer.MAX_VALUE), + tc.getMaxSize()); + assertNull(tc.decode(cd)); + } + + public void testUndecodeableCompressed() throws Exception { + CachedData cd=new CachedData( + SerializingTranscoder.COMPRESSED, + tu.encodeInt(Integer.MAX_VALUE), + tc.getMaxSize()); + System.out.println("got " + tc.decode(cd)); + assertNull(tc.decode(cd)); + } + + @Override + protected int getStringFlags() { + return 0; + } + +} diff --git a/src/test/java/net/spy/memcached/transcoders/TranscodeServiceTest.java b/src/test/java/net/spy/memcached/transcoders/TranscodeServiceTest.java new file mode 100644 index 000000000..b520c7f77 --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/TranscodeServiceTest.java @@ -0,0 +1,59 @@ +package net.spy.memcached.transcoders; + +import java.util.concurrent.Future; + +import junit.framework.TestCase; +import net.spy.memcached.CachedData; + +/** + * Test the transcode service. + */ +public class TranscodeServiceTest extends TestCase { + + private TranscodeService ts = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + ts = new TranscodeService(false); + } + + @Override + protected void tearDown() throws Exception { + ts.shutdown(); + assertTrue(ts.isShutdown()); + super.tearDown(); + } + + public void testNonExecuting() throws Exception { + CachedData cd = new CachedData(0, new byte[0], 0); + Future fs = ts.decode(new TestTranscoder(), cd); + assertEquals("Stuff!", fs.get()); + } + + public void testExecuting() throws Exception { + CachedData cd = new CachedData(1, new byte[0], 0); + Future fs = ts.decode(new TestTranscoder(), cd); + assertEquals("Stuff!", fs.get()); + } + + private static final class TestTranscoder implements Transcoder { + + public boolean asyncDecode(CachedData d) { + return d.getFlags() == 1; + } + + public String decode(CachedData d) { + return "Stuff!"; + } + + public CachedData encode(String o) { + throw new RuntimeException("Not invoked."); + } + + public int getMaxSize() { + return 5; + } + + } +} diff --git a/src/test/java/net/spy/memcached/transcoders/TranscoderUtilsTest.java b/src/test/java/net/spy/memcached/transcoders/TranscoderUtilsTest.java new file mode 100644 index 000000000..9b6964df0 --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/TranscoderUtilsTest.java @@ -0,0 +1,65 @@ +package net.spy.memcached.transcoders; + +import java.util.Arrays; + +import junit.framework.TestCase; + +/** + * Some test coverage for transcoder utils. + */ +public class TranscoderUtilsTest extends TestCase { + + private TranscoderUtils tu; + byte[] oversizeBytes=new byte[16]; + + @Override + protected void setUp() throws Exception { + super.setUp(); + tu=new TranscoderUtils(true); + } + + public void testBooleanOverflow() { + try { + boolean b=tu.decodeBoolean(oversizeBytes); + fail("Got " + b + " expected assertion."); + } catch(AssertionError e) { + // pass + } + } + + public void testByteOverflow() { + try { + byte b=tu.decodeByte(oversizeBytes); + fail("Got " + b + " expected assertion."); + } catch(AssertionError e) { + // pass + } + } + + public void testIntOverflow() { + try { + int b=tu.decodeInt(oversizeBytes); + fail("Got " + b + " expected assertion."); + } catch(AssertionError e) { + // pass + } + } + + public void testLongOverflow() { + try { + long b=tu.decodeLong(oversizeBytes); + fail("Got " + b + " expected assertion."); + } catch(AssertionError e) { + // pass + } + } + + public void testPackedLong() { + assertEquals("[1]", Arrays.toString(tu.encodeLong(1))); + } + + public void testUnpackedLong() { + assertEquals("[0, 0, 0, 0, 0, 0, 0, 1]", + Arrays.toString(new TranscoderUtils(false).encodeLong(1))); + } +} diff --git a/src/test/java/net/spy/memcached/transcoders/WhalinTranscoderTest.java b/src/test/java/net/spy/memcached/transcoders/WhalinTranscoderTest.java new file mode 100644 index 000000000..d3ea3bb4c --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/WhalinTranscoderTest.java @@ -0,0 +1,111 @@ +// Copyright (c) 2006 Dustin Sallings + +package net.spy.memcached.transcoders; + +import java.util.Arrays; +import java.util.Calendar; + +import net.spy.memcached.CachedData; + +/** + * Test the serializing transcoder. + */ +public class WhalinTranscoderTest extends BaseTranscoderCase { + + private WhalinTranscoder tc; + private TranscoderUtils tu; + + @Override + protected void setUp() throws Exception { + super.setUp(); + tc=new WhalinTranscoder(); + setTranscoder(tc); + tu=new TranscoderUtils(false); + } + + public void testNonserializable() throws Exception { + try { + tc.encode(new Object()); + fail("Processed a non-serializable object."); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testCompressedStringNotSmaller() throws Exception { + String s1="This is a test simple string that will not be compressed."; + // Reduce the compression threshold so it'll attempt to compress it. + tc.setCompressionThreshold(8); + CachedData cd=tc.encode(s1); + // This should *not* be compressed because it is too small + assertEquals(WhalinTranscoder.SPECIAL_STRING, cd.getFlags()); + assertTrue(Arrays.equals(s1.getBytes(), cd.getData())); + assertEquals(s1, tc.decode(cd)); + } + + public void testCompressedString() throws Exception { + // This one will actually compress + String s1="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; + tc.setCompressionThreshold(8); + CachedData cd=tc.encode(s1); + assertEquals( + WhalinTranscoder.COMPRESSED | WhalinTranscoder.SPECIAL_STRING, + cd.getFlags()); + assertFalse(Arrays.equals(s1.getBytes(), cd.getData())); + assertEquals(s1, tc.decode(cd)); + } + + public void testObject() throws Exception { + Calendar c=Calendar.getInstance(); + CachedData cd=tc.encode(c); + assertEquals(WhalinTranscoder.SERIALIZED, cd.getFlags()); + assertEquals(c, tc.decode(cd)); + } + + public void testCompressedObject() throws Exception { + tc.setCompressionThreshold(8); + Calendar c=Calendar.getInstance(); + CachedData cd=tc.encode(c); + assertEquals(WhalinTranscoder.SERIALIZED + |WhalinTranscoder.COMPRESSED, cd.getFlags()); + assertEquals(c, tc.decode(cd)); + } + + public void testUnencodeable() throws Exception { + try { + CachedData cd=tc.encode(new Object()); + fail("Should fail to serialize, got" + cd); + } catch(IllegalArgumentException e) { + // pass + } + } + + public void testUndecodeable() throws Exception { + CachedData cd=new CachedData( + Integer.MAX_VALUE & + ~(WhalinTranscoder.COMPRESSED | WhalinTranscoder.SERIALIZED), + tu.encodeInt(Integer.MAX_VALUE), + tc.getMaxSize()); + assertNull(tc.decode(cd)); + } + + public void testUndecodeableSerialized() throws Exception { + CachedData cd=new CachedData(WhalinTranscoder.SERIALIZED, + tu.encodeInt(Integer.MAX_VALUE), + tc.getMaxSize()); + assertNull(tc.decode(cd)); + } + + public void testUndecodeableCompressed() throws Exception { + CachedData cd=new CachedData(WhalinTranscoder.COMPRESSED, + tu.encodeInt(Integer.MAX_VALUE), + tc.getMaxSize()); + assertNull(tc.decode(cd)); + } + + @Override + protected int getStringFlags() { + return WhalinTranscoder.SPECIAL_STRING; + } + +} diff --git a/src/test/java/net/spy/memcached/transcoders/WhalinV1TranscoderTest.java b/src/test/java/net/spy/memcached/transcoders/WhalinV1TranscoderTest.java new file mode 100644 index 000000000..18df90f68 --- /dev/null +++ b/src/test/java/net/spy/memcached/transcoders/WhalinV1TranscoderTest.java @@ -0,0 +1,31 @@ +package net.spy.memcached.transcoders; + +import java.util.Arrays; + +import net.spy.memcached.CachedData; + +public class WhalinV1TranscoderTest extends BaseTranscoderCase { + + @Override + protected void setUp() throws Exception { + super.setUp(); + setTranscoder(new WhalinV1Transcoder()); + } + + @Override + public void testByteArray() throws Exception { + byte[] a={'a', 'b', 'c'}; + + CachedData cd=getTranscoder().encode(a); + byte[] decoded=(byte[])getTranscoder().decode(cd); + assertNotNull(decoded); + assertTrue(Arrays.equals(a, decoded)); + } + + @Override + protected int getStringFlags() { + // Flags are not used by this transcoder. + return 0; + } + +} diff --git a/src/test/java/net/spy/memcached/util/BTreeUtilTest.java b/src/test/java/net/spy/memcached/util/BTreeUtilTest.java new file mode 100644 index 000000000..ea29293a7 --- /dev/null +++ b/src/test/java/net/spy/memcached/util/BTreeUtilTest.java @@ -0,0 +1,80 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.util; + +import java.util.Arrays; + +import net.spy.memcached.compat.BaseMockCase; + +public class BTreeUtilTest extends BaseMockCase { + + public void testAA() { + + System.out.println(Arrays.toString("".getBytes())); + + } + + public void testFromByteArraysToHex() throws Exception { + byte[] byteArray1 = { 0, 'F', 'C', 0, 0 }; + assertEquals("0x0046430000", BTreeUtil.toHex(byteArray1)); + + byte[] byteArray2 = { 0, 0, 1, 'C' }; + assertEquals("0x00000143", BTreeUtil.toHex(byteArray2)); + + byte[] byteArray3 = { 1, 'C', 'A', 0 }; + assertEquals("0x01434100", BTreeUtil.toHex(byteArray3)); + + byte[] byteArray4 = { 0, 0, 'C', 'A', 0, 0 }; + assertEquals("0x000043410000", BTreeUtil.toHex(byteArray4)); + } + + public void testFromHexToByteArrays() throws Exception { + byte[] byteArray1 = { 0, 'F', 'C', 0, 0 }; + assertTrue(Arrays.equals(byteArray1, + BTreeUtil.hexStringToByteArrays("0x0046430000"))); + + byte[] byteArray2 = { 0, 0, 1, 'C' }; + assertTrue(Arrays.equals(byteArray2, + BTreeUtil.hexStringToByteArrays("0x00000143"))); + + byte[] byteArray3 = { 1, 'C', 'A', 0 }; + assertTrue(Arrays.equals(byteArray3, + BTreeUtil.hexStringToByteArrays("0x01434100"))); + + byte[] byteArray4 = { 0, 0, 'C', 'A', 0, 0 }; + assertTrue(Arrays.equals(byteArray4, + BTreeUtil.hexStringToByteArrays("0x000043410000"))); + } + + public void testCompareSameLengthByteArrays() throws Exception { + byte[] array1 = { 0, 0, 1, 0 }; + byte[] array2 = { 0, 0, 0, 0 }; + byte[] array3 = { 0, 0, 1, 0 }; + + assertEquals(1, BTreeUtil.compareByteArraysInLexOrder(array1, array2)); + assertEquals(-1, BTreeUtil.compareByteArraysInLexOrder(array2, array1)); + assertEquals(0, BTreeUtil.compareByteArraysInLexOrder(array1, array3)); + } + + public void testCompareDifferentLengthByteArrays() throws Exception { + byte[] array1 = { 0, 0, 1 }; + byte[] array2 = { 0, 0, 1, 0 }; + + assertEquals(-1, BTreeUtil.compareByteArraysInLexOrder(array1, array2)); + assertEquals(1, BTreeUtil.compareByteArraysInLexOrder(array2, array1)); + } +} diff --git a/src/test/java/net/spy/memcached/util/CacheLoaderTest.java b/src/test/java/net/spy/memcached/util/CacheLoaderTest.java new file mode 100644 index 000000000..9e4bae317 --- /dev/null +++ b/src/test/java/net/spy/memcached/util/CacheLoaderTest.java @@ -0,0 +1,88 @@ +package net.spy.memcached.util; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.MemcachedClientIF; +import net.spy.memcached.compat.BaseMockCase; +import net.spy.memcached.internal.ImmediateFuture; + +import org.jmock.Mock; + +/** + * Test the cache loader. + */ +public class CacheLoaderTest extends BaseMockCase { + + private ExecutorService es = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + BlockingQueue wq = new LinkedBlockingQueue(); + es = new ThreadPoolExecutor(10, 10, 5*60, TimeUnit.SECONDS, wq); + } + + @Override + protected void tearDown() throws Exception { + es.shutdownNow(); + super.tearDown(); + } + + public void testSimpleLoading() throws Exception { + Mock m = mock(MemcachedClientIF.class); + + LoadCounter sl = new LoadCounter(); + CacheLoader cl = new CacheLoader((MemcachedClientIF)m.proxy(), + es, sl, 0); + + m.expects(once()).method("set").with(eq("a"), eq(0), eq(1)) + .will(returnValue(new ImmediateFuture(true))); + m.expects(once()).method("set").with(eq("a"), eq(0), eq(1)) + .will(throwException(new IllegalStateException("Full up"))); + m.expects(once()).method("set").with(eq("b"), eq(0), eq(2)) + .will(returnValue(new ImmediateFuture(new RuntimeException("blah")))); + m.expects(once()).method("set").with(eq("c"), eq(0), eq(3)) + .will(returnValue(new ImmediateFuture(false))); + + Map map = new HashMap(); + map.put("a", 1); + map.put("b", 2); + map.put("c", 3); + + // Load the cache and wait for it to finish. + cl.loadData(map).get(); + es.shutdown(); + es.awaitTermination(1, TimeUnit.SECONDS); + + assertEquals(1, sl.success); + assertEquals(1, sl.exceptions); + assertEquals(1, sl.failure); + } + + static class LoadCounter implements CacheLoader.StorageListener { + + public volatile int exceptions = 0; + public volatile int success = 0; + public volatile int failure = 0; + + public void errorStoring(String k, Exception e) { + exceptions++; + } + + public void storeResult(String k, boolean result) { + if(result) { + success++; + } else { + failure++; + } + } + + } + +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientConnectTest.java b/src/test/manual/net/spy/memcached/ArcusClientConnectTest.java new file mode 100644 index 000000000..c537b7963 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientConnectTest.java @@ -0,0 +1,41 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; + +public class ArcusClientConnectTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + // This test assumes we use ZK + openFromZK(); + } + + @Override + protected void tearDown() throws Exception { + // do nothing + } + + public void testOpenAndWait() { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + ArcusClient client = ArcusClient.createArcusClient(ZK_HOST, + ZK_SERVICE_ID, cfb); + client.shutdown(); + } +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientFrontCacheTest.java b/src/test/manual/net/spy/memcached/ArcusClientFrontCacheTest.java new file mode 100644 index 000000000..fb827be69 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientFrontCacheTest.java @@ -0,0 +1,70 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; + +public class ArcusClientFrontCacheTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + // This test assumes we use ZK + openFromZK(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testCreateSingleClient() { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + cfb.setFrontCacheExpireTime(10); + cfb.setMaxFrontCacheElements(10); + + ArcusClient.createArcusClient(ZK_HOST, ZK_SERVICE_ID, cfb); + } + + public void testCreatePool() { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + cfb.setFrontCacheExpireTime(10); + cfb.setMaxFrontCacheElements(10); + + ArcusClient.createArcusClientPool(ZK_HOST, ZK_SERVICE_ID, cfb, 4); + } + + public void testKV() { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + cfb.setFrontCacheExpireTime(10); + cfb.setMaxFrontCacheElements(10); + + ArcusClient client = ArcusClient.createArcusClient(ZK_HOST, + ZK_SERVICE_ID, cfb); + + try { + Assert.assertTrue(client.set("test:key", 100, "value").get()); + Assert.assertEquals("value", client.get("test:key")); + + Assert.assertTrue(client.delete("test:key").get()); + + Assert.assertNull(client.get("test:key")); + } catch (Exception e) { + // TODO: handle exception + } + } +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientNotExistsServiceCodeTest.java b/src/test/manual/net/spy/memcached/ArcusClientNotExistsServiceCodeTest.java new file mode 100644 index 000000000..abd3287f8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientNotExistsServiceCodeTest.java @@ -0,0 +1,48 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import org.junit.Ignore; + +import net.spy.memcached.collection.BaseIntegrationTest; + +@Ignore +public class ArcusClientNotExistsServiceCodeTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + // do nothing + }; + + @Override + protected void tearDown() throws Exception { + // do nothing + }; + + public void testNotExistsServiceCode() { + if (!USE_ZK) + return; + + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + try { + ArcusClient.createArcusClient(ZK_HOST, "NOT_EXISTS_SVC_CODE", cfb); + } catch (NotExistsServiceCodeException e) { + return; + } + fail("not exists service code test failed."); + } +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientPoolReconnectTest.java b/src/test/manual/net/spy/memcached/ArcusClientPoolReconnectTest.java new file mode 100644 index 000000000..7fa39e336 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientPoolReconnectTest.java @@ -0,0 +1,55 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import net.spy.memcached.collection.BaseIntegrationTest; + +import org.junit.Ignore; + +@Ignore +public class ArcusClientPoolReconnectTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + // do nothing + }; + + @Override + protected void tearDown() throws Exception { + // do nothing + }; + + public void testOpenAndWait() { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + ArcusClientPool client = ArcusClient.createArcusClientPool(ZK_HOST, + ZK_SERVICE_ID, cfb, 2); + + try { + Thread.sleep(120000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + client.shutdown(); + + try { + Thread.sleep(Long.MAX_VALUE); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientPoolShutdownTest.java b/src/test/manual/net/spy/memcached/ArcusClientPoolShutdownTest.java new file mode 100644 index 000000000..056af4a21 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientPoolShutdownTest.java @@ -0,0 +1,92 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.List; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; + +import org.junit.Ignore; + +@Ignore +public class ArcusClientPoolShutdownTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + // do nothing + } + + @Override + protected void tearDown() throws Exception { + // do nothing + } + + public void testOpenAndWait() { + if (!USE_ZK) { + return; + } + + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + ArcusClientPool client = ArcusClient.createArcusClientPool(ZK_HOST, + ZK_SERVICE_ID, cfb, 2); + + // This threads must be stopped after client is shutdown. + List threadNames = new ArrayList(); + threadNames.add("main-EventThread"); + threadNames.add("main-SendThread(" + ZK_HOST + ")"); + threadNames + .add("Cache Manager IO for " + ZK_SERVICE_ID + "@" + ZK_HOST); + + // Check exists threads + List currentThreads = new ArrayList(); + for (Thread t : Thread.getAllStackTraces().keySet()) { + currentThreads.add(t.getName()); + } + for (String name : threadNames) { + Assert.assertTrue(currentThreads.contains(name)); + } + + // Sleep 1s + try { + Thread.sleep(1000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + // Shutdown the client. + client.shutdown(); + + // Sleep 1s + try { + Thread.sleep(1000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + // Check the threads after shutdown the client + currentThreads.clear(); + for (Thread t : Thread.getAllStackTraces().keySet()) { + currentThreads.add(t.getName()); + } + for (String name : threadNames) { + Assert.assertTrue("Thread '" + name + "' is exists.", + !currentThreads.contains(name)); + } + } +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientReconnectTest.java b/src/test/manual/net/spy/memcached/ArcusClientReconnectTest.java new file mode 100644 index 000000000..53098f694 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientReconnectTest.java @@ -0,0 +1,59 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import org.junit.Ignore; + +import net.spy.memcached.collection.BaseIntegrationTest; + +@Ignore +public class ArcusClientReconnectTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + // do nothing + } + + @Override + protected void tearDown() throws Exception { + // do nothing + } + + public void testOpenAndWait() { + if (!USE_ZK) { + return; + } + + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + ArcusClient client = ArcusClient.createArcusClient(ZK_HOST, + ZK_SERVICE_ID, cfb); + + try { + Thread.sleep(120000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + client.shutdown(); + + try { + Thread.sleep(Long.MAX_VALUE); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } +} diff --git a/src/test/manual/net/spy/memcached/ArcusClientShutdownTest.java b/src/test/manual/net/spy/memcached/ArcusClientShutdownTest.java new file mode 100644 index 000000000..9755a0c86 --- /dev/null +++ b/src/test/manual/net/spy/memcached/ArcusClientShutdownTest.java @@ -0,0 +1,90 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached; + +import java.util.ArrayList; +import java.util.List; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; + +import org.junit.Ignore; + +@Ignore +public class ArcusClientShutdownTest extends BaseIntegrationTest { + + @Override + protected void setUp() throws Exception { + } + + @Override + protected void tearDown() throws Exception { + } + + public void testOpenAndWait() { + if (!USE_ZK) { + return; + } + + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + ArcusClient client = ArcusClient.createArcusClient(ZK_HOST, + ZK_SERVICE_ID, cfb); + + // This threads must be stopped after client is shutdown. + List threadNames = new ArrayList(); + threadNames.add("main-EventThread"); + threadNames.add("main-SendThread(" + ZK_HOST + ")"); + threadNames + .add("Cache Manager IO for " + ZK_SERVICE_ID + "@" + ZK_HOST); + + // Check exists threads + List currentThreads = new ArrayList(); + for (Thread t : Thread.getAllStackTraces().keySet()) { + currentThreads.add(t.getName()); + } + for (String name : threadNames) { + Assert.assertTrue(currentThreads.contains(name)); + } + + // Sleep 1s + try { + Thread.sleep(1000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + // Shutdown the client. + client.shutdown(); + + // Sleep 1s + try { + Thread.sleep(1000L); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + // Check the threads after shutdown the client + currentThreads.clear(); + for (Thread t : Thread.getAllStackTraces().keySet()) { + currentThreads.add(t.getName()); + } + for (String name : threadNames) { + Assert.assertTrue("Thread '" + name + "' is exists.", + !currentThreads.contains(name)); + } + } +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetErrorTest.java b/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetErrorTest.java new file mode 100644 index 000000000..c0950f6bb --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetErrorTest.java @@ -0,0 +1,272 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.SMGetFuture; + +public class ByteArrayBKeySMGetErrorTest extends BaseIntegrationTest { + + private static final List KEY_LIST = new ArrayList(); + + static { + String KEY = ByteArrayBKeySMGetErrorTest.class.getSimpleName() + + new Random().nextLong(); + for (int i = 1; i <= 10; i++) + KEY_LIST.add(KEY + (i * 9)); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + for (String KEY : KEY_LIST) { + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + } + + @Override + protected void tearDown() throws Exception { + for (String KEY : KEY_LIST) { + mc.delete(KEY).get(); + } + super.tearDown(); + } + + public void testDuplicated() { + // insert test data + try { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), + new byte[] { (byte) 1 }, null, "VALUE", + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), + new byte[] { (byte) 1 }, null, "VALUE", + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), + new byte[] { (byte) 2 }, null, "VALUE", + new CollectionAttributes()).get()); + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, new byte[] { (byte) 0 }, + new byte[] { (byte) 10 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(3, map.size()); + + Assert.assertEquals("DUPLICATED", future.getOperationStatus() + .getMessage()); + + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBkeyMismatch() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(20); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + for (int i = 0; i < 20; i++) { + // trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), + new byte[] { (byte) i }, null, "VALUE", attr).get()); + + // not trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), i, null, + "VALUE", new CollectionAttributes()).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, new byte[] { (byte) 0 }, + new byte[] { (byte) 15 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 20); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(0, map.size()); + Assert.assertEquals("BKEY_MISMATCH", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTrimmed() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(10); + attr.setOverflowAction(CollectionOverflowAction.smallest_trim); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + mc.asyncBopCreate(KEY_LIST.get(1), ElementValueType.STRING, attr) + .get(); + + for (int i = 0; i < 30; i++) { + // trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), + new byte[] { (byte) i }, null, "VALUE", attr).get()); + } + + // not trimmed + for (int i = 0; i < 9; i++) { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), + new byte[] { (byte) i }, null, "VALUE", attr).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + byte[] from = new byte[] { (byte) 20 }; + byte[] to = new byte[] { (byte) 10 }; + long count = 100; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, (int) count); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(1, map.size()); + Assert.assertEquals("TRIMMED", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testOutOfRange() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(10); + attr.setOverflowAction(CollectionOverflowAction.smallest_trim); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + mc.asyncBopCreate(KEY_LIST.get(1), ElementValueType.STRING, attr) + .get(); + + for (int i = 0; i < 30; i++) { + // trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), + new byte[] { (byte) i }, null, "VALUE", attr).get()); + } + + // not trimmed + for (int i = 0; i < 9; i++) { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), + new byte[] { (byte) i }, null, "VALUE", attr).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + byte[] from = new byte[] { (byte) 10 }; + byte[] to = new byte[] { (byte) 0 }; + long count = 100; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, (int) count); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(0, map.size()); + Assert.assertEquals("OUT_OF_RANGE", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDuplicated2() { + // insert test data + try { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), 1, null, + "VALUE", new CollectionAttributes()).get()); + + for (int bkey = 0; bkey < KEY_LIST.size() - 1; bkey++) { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(bkey), bkey, + null, "VALUE", new CollectionAttributes()).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertEquals("DUPLICATED", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetIrregularEflagTest.java b/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetIrregularEflagTest.java new file mode 100644 index 000000000..d8d9d3680 --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetIrregularEflagTest.java @@ -0,0 +1,76 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.SMGetElement; + +public class ByteArrayBKeySMGetIrregularEflagTest extends BaseIntegrationTest { + + private final String key1 = "ByteArrayBKeySMGetIrregularEflagTest1" + + (Math.abs(new Random().nextInt(99)) + 100); + private final String key2 = "ByteArrayBKeySMGetIrregularEflagTest2" + + (Math.abs(new Random().nextInt(99)) + 100); + + private final byte[] eFlag = { 1 }; + + private final Object value = "valvalvalvalvalvalvalvalvalval"; + + public void testGetAll_1() { + try { + mc.delete(key1).get(); + mc.delete(key2).get(); + + mc.asyncBopInsert(key1, new byte[] { 0 }, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key1, new byte[] { 3 }, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key1, new byte[] { 2 }, eFlag, value + "2", + new CollectionAttributes()).get(); + + mc.asyncBopInsert(key2, new byte[] { 1 }, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key2, new byte[] { 5 }, null, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key2, new byte[] { 4 }, eFlag, value + "2", + new CollectionAttributes()).get(); + + List> list = mc.asyncBopSortMergeGet( + new ArrayList() { + { + add(key1); + add(key2); + } + }, new byte[] { 0 }, new byte[] { 10 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 10).get(); + + for (int i = 0; i < list.size(); i++) { + System.out.println(list.get(i)); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetTest.java b/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetTest.java new file mode 100644 index 000000000..14354e8fc --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/ByteArrayBKeySMGetTest.java @@ -0,0 +1,482 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.SMGetFuture; + +public class ByteArrayBKeySMGetTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + List keyList = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + super.tearDown(); + } + + public void testSMGetMissAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 1 }; + byte[] to = new byte[] { (byte) 2 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertTrue(map.isEmpty()); + Assert.assertEquals(future.getMissedKeyList().toString(), 10, + future.getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + i, map.get(i).getKey()); + Assert.assertTrue(Arrays.equals(new byte[] { (byte) i }, map + .get(i).getByteBkey())); + Assert.assertEquals("VALUE" + i, map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetMoreCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertTrue(Arrays.equals(new byte[] { (byte) (i + 1) }, + map.get(i).getByteBkey())); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetExactCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(9, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertTrue(Arrays.equals(new byte[] { (byte) (i + 1) }, + map.get(i).getByteBkey())); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetLessThanCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 9; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 9; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(8, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertTrue(Arrays.equals(new byte[] { (byte) (i + 1) }, + map.get(i).getByteBkey())); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalf() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalfDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 10 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTimeout() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 500; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 1000 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1L, + TimeUnit.MILLISECONDS); + + fail("Timeout is not tested."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + fail(e.getMessage()); + } + fail("There's no timeout."); + } + + public void testPerformanceGet1000KeysWithoutOffset() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 1000; i++) { + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 1000 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1000L, + TimeUnit.MILLISECONDS); + + // System.out.println("elapsed 1 " + // + (System.currentTimeMillis() - start) + "ms"); + // System.out.println("result size=" + map.size()); + } catch (TimeoutException e) { + future.cancel(true); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + } + + public void testSMGetWithMassiveKeys() { + int testSize = 100; + + try { + keyList = new ArrayList(); + for (int i = 0; i < testSize; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < testSize; i++) { + if (i % 2 == 0) { + continue; + } + mc.asyncBopInsert(KEY + i, new byte[] { (byte) i }, null, + "VALUE" + i, new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + byte[] from = new byte[] { (byte) 0 }; + byte[] to = new byte[] { (byte) 100 }; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertEquals(50, map.size()); + + List missed = future.getMissedKeyList(); + Assert.assertEquals(testSize / 2, missed.size()); + } catch (Exception e) { + future.cancel(true); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/SMGetErrorTest.java b/src/test/manual/net/spy/memcached/btreesmget/SMGetErrorTest.java new file mode 100644 index 000000000..8479a570c --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/SMGetErrorTest.java @@ -0,0 +1,359 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.SMGetFuture; + +public class SMGetErrorTest extends BaseIntegrationTest { + + private static final List KEY_LIST = new ArrayList(); + + static { + String KEY = SMGetErrorTest.class.getSimpleName() + + new Random().nextLong(); + for (int i = 1; i <= 10; i++) + KEY_LIST.add(KEY + (i * 9)); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + for (String KEY : KEY_LIST) { + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + } + + @Override + protected void tearDown() throws Exception { + for (String KEY : KEY_LIST) { + mc.delete(KEY).get(); + } + super.tearDown(); + } + + public void testDuplicated() { + // insert test data + try { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), 1, null, + "VALUE", new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), 1, null, + "VALUE", new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), 2, null, + "VALUE", new CollectionAttributes()).get()); + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(3, map.size()); + + Assert.assertEquals("DUPLICATED", future.getOperationStatus() + .getMessage()); + + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBkeyMismatch() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(20); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + for (int i = 0; i < 20; i++) { + // trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), + new byte[] { (byte) i }, null, "VALUE", attr).get()); + + // not trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), i, null, + "VALUE", new CollectionAttributes()).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, 0, 15, + ElementFlagFilter.DO_NOT_FILTER, 0, 20); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(0, map.size()); + Assert.assertEquals("BKEY_MISMATCH", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTrimmed() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(10); + attr.setOverflowAction(CollectionOverflowAction.smallest_trim); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + mc.asyncBopCreate(KEY_LIST.get(1), ElementValueType.STRING, attr) + .get(); + + for (int i = 0; i < 30; i++) { + // trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), i, null, + "VALUE", attr).get()); + } + + // not trimmed + for (int i = 0; i < 9; i++) { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), i, null, + "VALUE", attr).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // display current bkey list + try { + Map> map = mc.asyncBopGet(KEY_LIST.get(0), 0, + 10000, ElementFlagFilter.DO_NOT_FILTER, 0, 10000, false, + false).get(); + // System.out.println(KEY_LIST.get(0) + " => "); + // for (Entry entry : map.entrySet()) { + // System.out.print(entry.getKey()); + // System.out.print(" , "); + // } + // System.out.println(""); + + map = mc.asyncBopGet(KEY_LIST.get(1), 0, 10000, + ElementFlagFilter.DO_NOT_FILTER, 0, 10000, false, false) + .get(); + // System.out.println(KEY_LIST.get(1) + " => "); + // for (Entry entry : map.entrySet()) { + // System.out.print(entry.getKey()); + // System.out.print(" , "); + // } + // System.out.println(""); + + } catch (Exception e) { + // TODO: handle exception + } + + // sort merge get + long from = 20; + long to = 10; + long count = from - to; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, (int) count); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(1, map.size()); + Assert.assertEquals("TRIMMED", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testOutOfRange() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(10); + attr.setOverflowAction(CollectionOverflowAction.smallest_trim); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + mc.asyncBopCreate(KEY_LIST.get(1), ElementValueType.STRING, attr) + .get(); + + for (int i = 0; i < 30; i++) { + // trimmed + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), i, null, + "VALUE", attr).get()); + } + + // not trimmed + for (int i = 0; i < 9; i++) { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(1), i, null, + "VALUE", attr).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // display current bkey list + try { + Map> map = mc.asyncBopGet(KEY_LIST.get(0), 0, + 10000, ElementFlagFilter.DO_NOT_FILTER, 0, 10000, false, + false).get(); + // System.out.println(KEY_LIST.get(0) + " => "); + // for (Entry entry : map.entrySet()) { + // System.out.print(entry.getKey()); + // System.out.print(" , "); + // } + // System.out.println(""); + + map = mc.asyncBopGet(KEY_LIST.get(1), 0, 10000, + ElementFlagFilter.DO_NOT_FILTER, 0, 10000, false, false) + .get(); + // System.out.println(KEY_LIST.get(1) + " => "); + // for (Entry entry : map.entrySet()) { + // System.out.print(entry.getKey()); + // System.out.print(" , "); + // } + // System.out.println(""); + + } catch (Exception e) { + // TODO: handle exception + } + + // sort merge get + long from = 10; + long to = 0; + long count = from - to; + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, from, to, + ElementFlagFilter.DO_NOT_FILTER, 0, (int) count); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(0, map.size()); + Assert.assertEquals("OUT_OF_RANGE", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDuplicated2() { + // insert test data + try { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(0), 1, null, + "VALUE", new CollectionAttributes()).get()); + + for (int bkey = 0; bkey < KEY_LIST.size() - 1; bkey++) { + Assert.assertTrue(mc.asyncBopInsert(KEY_LIST.get(bkey), bkey, + null, "VALUE", new CollectionAttributes()).get()); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(KEY_LIST, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertEquals("DUPLICATED", future.getOperationStatus() + .getMessage()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testUnreadable() { + // insert test data + try { + CollectionAttributes attr = new CollectionAttributes(); + attr.setReadable(false); + + mc.asyncBopCreate(KEY_LIST.get(0), ElementValueType.STRING, attr) + .get(); + + mc.asyncBopInsert(KEY_LIST.get(0), 0, null, "V", attr).get(); + mc.asyncBopInsert(KEY_LIST.get(0), 1, null, "V", attr).get(); + + mc.asyncBopInsert(KEY_LIST.get(1), 0, null, "V", attr).get(); + mc.asyncBopInsert(KEY_LIST.get(1), 1, null, "V", attr).get(); + } catch (Exception e) { + fail(e.getMessage()); + } + + // sort merge get + SMGetFuture>> future = mc + .asyncBopSortMergeGet(new ArrayList() { + { + add(KEY_LIST.get(0)); + add(KEY_LIST.get(1)); + } + }, 10, 0, ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(0, map.size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/SMGetTest.java b/src/test/manual/net/spy/memcached/btreesmget/SMGetTest.java new file mode 100644 index 000000000..002ada94c --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/SMGetTest.java @@ -0,0 +1,460 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.SMGetFuture; + +public class SMGetTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + List keyList = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + super.tearDown(); + } + + public void testSMGetMissAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 1, 2, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertTrue(map.isEmpty()); + Assert.assertEquals(future.getMissedKeyList().toString(), 10, + future.getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + i, map.get(i).getKey()); + Assert.assertEquals(i, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + i, map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetMoreCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetExactCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(9, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetLessThanCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 9; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 9; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(8, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalf() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalfDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTimeout() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 500; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + } + } catch (TimeoutException e) { + + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1L, + TimeUnit.MILLISECONDS); + + fail("Timeout is not tested."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + fail("There's no timeout."); + } + + public void testPerformanceGet1000KeysWithoutOffset() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 1000; i++) { + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + } + } catch (TimeoutException e) { + + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1000L, + TimeUnit.MILLISECONDS); + + // System.out.println((System.currentTimeMillis() - start) + "ms"); + } catch (TimeoutException e) { + future.cancel(true); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + } + + public void testSMGetWithMassiveKeys() { + int testSize = 2000; + + try { + keyList = new ArrayList(); + for (int i = 0; i < testSize; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < testSize; i++) { + if (i % 2 == 0) { + continue; + } + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, testSize, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertEquals(500, map.size()); + + List missed = future.getMissedKeyList(); + Assert.assertEquals(testSize / 2, missed.size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetOverflowMaxCount() { + try { + mc.asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 1001); + } catch (IllegalArgumentException e) { + return; + } + Assert.fail("There's no IllegalArgumentException."); + } +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithCombinationEflag.java b/src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithCombinationEflag.java new file mode 100644 index 000000000..4b316067d --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithCombinationEflag.java @@ -0,0 +1,488 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.SMGetFuture; + +public class SMGetTestWithCombinationEflag extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + List keyList = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + super.tearDown(); + } + + public void testSMGetMissAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 1, 2, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertTrue(map.isEmpty()); + Assert.assertEquals(future.getMissedKeyList().toString(), 10, + future.getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + i, map.get(i).getKey()); + Assert.assertEquals(i, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + i, map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetMoreCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetExactCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(9, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetLessThanCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 9; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 9; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(8, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalf() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalfDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTimeout() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 500; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + } + } catch (TimeoutException e) { + + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1L, + TimeUnit.MILLISECONDS); + + fail("Timeout is not tested."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + fail("There's no timeout."); + } + + public void testPerformanceGet1000KeysWithoutOffset() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 1000; i++) { + if (i % 2 == 0) + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), + "VALUE" + i, new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + else + mc.asyncBopInsert(KEY + i, i, null, "VALUE" + i, + new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + } + } catch (TimeoutException e) { + + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1000L, + TimeUnit.MILLISECONDS); + + // System.out.println((System.currentTimeMillis() - start) + "ms"); + } catch (TimeoutException e) { + future.cancel(true); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + } + + public void testSMGetWithMassiveKeys() { + int testSize = 100; + + try { + keyList = new ArrayList(); + for (int i = 0; i < testSize; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < testSize; i++) { + if (i % 2 == 0) { + continue; + } + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, testSize, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertEquals(50, map.size()); + + List missed = future.getMissedKeyList(); + Assert.assertEquals(testSize / 2, missed.size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithEflag.java b/src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithEflag.java new file mode 100644 index 000000000..e21412376 --- /dev/null +++ b/src/test/manual/net/spy/memcached/btreesmget/SMGetTestWithEflag.java @@ -0,0 +1,450 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.btreesmget; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.SMGetElement; +import net.spy.memcached.internal.SMGetFuture; + +public class SMGetTestWithEflag extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + List keyList = null; + + @Override + protected void setUp() throws Exception { + super.setUp(); + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + try { + mc.delete(KEY).get(); + } catch (Exception e) { + + } + super.tearDown(); + } + + public void testSMGetMissAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 1, 2, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertTrue(map.isEmpty()); + Assert.assertEquals(future.getMissedKeyList().toString(), 10, + future.getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAll() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + i, map.get(i).getKey()); + Assert.assertEquals(i, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + i, map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetMoreCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 50; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 50; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetExactCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(9, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllWithOffsetLessThanCount() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 9; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 9; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 1, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(8, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + + for (int i = 0; i < map.size(); i++) { + Assert.assertEquals(KEY + (i + 1), map.get(i).getKey()); + Assert.assertEquals(i + 1, map.get(i).getBkey()); + Assert.assertEquals("VALUE" + (i + 1), map.get(i).getValue()); + } + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitAllDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < 10; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + Assert.assertEquals(10, map.size()); + Assert.assertTrue(future.getMissedKeyList().isEmpty()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalf() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testSMGetHitHalfDesc() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 10; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 5; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + assertEquals(5, map.size()); + + assertEquals(future.getMissedKeyList().toString(), 5, future + .getMissedKeyList().size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTimeout() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 500; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + } + } catch (TimeoutException e) { + + } catch (Exception e) { + fail(e.getMessage()); + } + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1L, + TimeUnit.MILLISECONDS); + + fail("Timeout is not tested."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + fail("There's no timeout."); + } + + public void testPerformanceGet1000KeysWithoutOffset() { + try { + keyList = new ArrayList(); + for (int i = 0; i < 1000; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + + for (int i = 0; i < 1000; i++) { + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(1000L, + TimeUnit.MILLISECONDS); + } + } catch (TimeoutException e) { + + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, 1000, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future.get(1000L, + TimeUnit.MILLISECONDS); + + // System.out.println((System.currentTimeMillis() - start) + "ms"); + } catch (TimeoutException e) { + future.cancel(true); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + fail(e.getMessage()); + } + } + + public void testSMGetWithMassiveKeys() { + int testSize = 100; + + try { + keyList = new ArrayList(); + for (int i = 0; i < testSize; i++) { + mc.delete(KEY + i).get(); + keyList.add(KEY + i); + } + for (int i = 0; i < testSize; i++) { + if (i % 2 == 0) { + continue; + } + mc.asyncBopInsert(KEY + i, i, "EFLAG".getBytes(), "VALUE" + i, + new CollectionAttributes()).get(); + } + } catch (Exception e) { + fail(e.getMessage()); + } + + long start = System.currentTimeMillis(); + + SMGetFuture>> future = mc + .asyncBopSortMergeGet(keyList, 0, testSize, + ElementFlagFilter.DO_NOT_FILTER, 0, 500); + try { + List> map = future + .get(1000L, TimeUnit.SECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertEquals(50, map.size()); + + List missed = future.getMissedKeyList(); + Assert.assertEquals(testSize / 2, missed.size()); + } catch (Exception e) { + future.cancel(true); + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleBoundaryTest.java b/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleBoundaryTest.java new file mode 100644 index 000000000..0c0f0ffe8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleBoundaryTest.java @@ -0,0 +1,71 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BopInsertBulkMultipleBoundaryTest extends BaseIntegrationTest { + + public void testBopGet_Overflow() throws Exception { + String key = "MyBopOverflowtestKey23"; + String value = "MyValue"; + + // delete b+tree + mc.asyncBopDelete(key, 0, 10000, ElementFlagFilter.DO_NOT_FILTER, 0, + true).get(); + + // Create a B+ Tree + mc.asyncBopInsert(key, 0, null, "item0", new CollectionAttributes()); + + int maxcount = 10; + + // Set maxcount + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.error); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // generate bkey + Map bkeys = new TreeMap(); + for (int i = 1; i <= maxcount; i++) { + bkeys.put((long) i, value); + } + + // SET + Future> future = mc + .asyncBopPipedInsertBulk(key, bkeys, new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + Assert.assertEquals("Failed count is not 1.", 1, errorList.size()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleTest.java b/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleTest.java new file mode 100644 index 000000000..af5aac2ce --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkMultipleTest.java @@ -0,0 +1,257 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Map; +import java.util.Map.Entry; +import java.util.TreeMap; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BopInsertBulkMultipleTest extends BaseIntegrationTest { + + public void testInsertAndGet() { + String key = "MyBopKey32"; + String value = "MyValue"; + + int bkeySize = 500; + Map bkeys = new TreeMap(); + for (int i = 0; i < bkeySize; i++) { + bkeys.put((long) i, value); + } + + try { + // REMOVE + mc.asyncBopDelete(key, 0, 4000, ElementFlagFilter.DO_NOT_FILTER, 0, + true); + + // SET + Future> future = mc + .asyncBopPipedInsertBulk(key, bkeys, + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (Entry entry : bkeys.entrySet()) { + Future>> f = mc.asyncBopGet(key, + entry.getKey(), ElementFlagFilter.DO_NOT_FILTER, false, + false); + Map> map = null; + try { + map = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + Object value2 = map.entrySet().iterator().next().getValue() + .getValue(); + if (!value.equals(value2)) { + errorCount++; + } + } + + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (Entry entry : bkeys.entrySet()) { + mc.asyncBopDelete(key, entry.getKey(), + ElementFlagFilter.DO_NOT_FILTER, true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeout() { + String key = "MyBopKey"; + String value = "MyValue"; + + int bkeySize = mc.getMaxPipedItemCount(); + Map bkeys = new TreeMap(); + for (int i = 0; i < bkeySize; i++) { + bkeys.put((long) i, value); + } + + try { + // SET + Future> future = mc + .asyncBopPipedInsertBulk(key, bkeys, + new CollectionAttributes()); + try { + Map errorList = future.get( + 1L, TimeUnit.NANOSECONDS); + + Assert.assertTrue("Error list is not empty." + errorList, + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + Assert.fail(); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testInsertAndGetUsingSingleClient() { + String key = "MyBopKey333"; + String value = "MyValue"; + + int bkeySize = 500; + Map bkeys = new TreeMap(); + for (int i = 0; i < bkeySize; i++) { + bkeys.put((long) i, value); + } + + try { + // REMOVE + mc.asyncBopDelete(key, 0, 4000, ElementFlagFilter.DO_NOT_FILTER, 0, + true); + + // SET + Future> future = mc + .asyncBopPipedInsertBulk(key, bkeys, + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (Entry entry : bkeys.entrySet()) { + Future>> f = mc.asyncBopGet(key, + entry.getKey(), ElementFlagFilter.DO_NOT_FILTER, false, + false); + Map> map = null; + try { + map = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + Object value2 = map.entrySet().iterator().next().getValue() + .getValue(); + if (!value.equals(value2)) { + errorCount++; + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (Entry entry : bkeys.entrySet()) { + mc.asyncBopDelete(key, entry.getKey(), + ElementFlagFilter.DO_NOT_FILTER, true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeoutUsingSingleClient() { + String key = "MyBopKey"; + String value = "MyValue"; + + int bkeySize = mc.getMaxPipedItemCount(); + Map bkeys = new TreeMap(); + for (int i = 0; i < bkeySize; i++) { + bkeys.put((long) i, value); + } + + try { + // SET + Future> future = mc + .asyncBopPipedInsertBulk(key, bkeys, + new CollectionAttributes()); + try { + Map errorList = future.get( + 1L, TimeUnit.NANOSECONDS); + + Assert.assertTrue("Error list is not empty." + errorList, + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + Assert.fail(); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testErrorCount() { + String key = "MyBopKeyErrorCount"; + String value = "MyValue"; + + int bkeySize = 1200; + Map bkeys = new TreeMap(); + for (int i = 0; i < bkeySize; i++) { + bkeys.put((long) i, value); + } + + try { + System.out.println(11); + mc.delete(key).get(); + + // SET + Future> future = mc + .asyncBopPipedInsertBulk(key, bkeys, null); + + Map map = future.get(2000L, + TimeUnit.MILLISECONDS); + assertEquals(bkeySize, map.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkTest.java b/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkTest.java new file mode 100644 index 000000000..21d43f5a9 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/BopInsertBulkTest.java @@ -0,0 +1,408 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BopInsertBulkTest extends BaseIntegrationTest { + + private static final byte[] EFLAG = new byte[] { 0, 0, 1, 1 }; + + @Override + protected void setUp() throws Exception { + super.setUp(); + } + + public void testInsertAndGet() { + String value = "MyValue"; + long bkey = Long.MAX_VALUE; + + int keySize = 1000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyBopKeyA" + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, null, value, + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + Future>> f = mc.asyncBopGet(key, + bkey, ElementFlagFilter.DO_NOT_FILTER, false, false); + Map> map = null; + try { + map = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + Object value2 = map.entrySet().iterator().next().getValue() + .getValue(); + if (!value.equals(value2)) { + errorCount++; + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (String key : keys) { + mc.asyncBopDelete(key, bkey, ElementFlagFilter.DO_NOT_FILTER, + true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testInsertAndGetByteArrayBkey() { + String value = "MyValue"; + byte[] bkey = new byte[] { 0, 1, 1, 1 }; + + int keySize = 1000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyBopKeyA" + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, null, value, + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + Future>> f = mc.asyncBopGet( + key, bkey, ElementFlagFilter.DO_NOT_FILTER, false, + false); + Map> map = null; + try { + map = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + Element value2 = map.entrySet().iterator().next() + .getValue(); + if (!value.equals(value2.getValue())) { + errorCount++; + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (String key : keys) { + mc.asyncBopDelete(key, bkey, ElementFlagFilter.DO_NOT_FILTER, + true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testInsertAndGetWithEflag() { + String value = "MyValue"; + long bkey = Long.MAX_VALUE; + + int keySize = 1000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyBopKeyA" + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, EFLAG, + value, new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + Future>> f = mc.asyncBopGet(key, + bkey, ElementFlagFilter.DO_NOT_FILTER, false, false); + Map> map = null; + try { + map = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + Object value2 = map.entrySet().iterator().next().getValue() + .getValue(); + if (!value.equals(value2)) { + errorCount++; + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (String key : keys) { + mc.asyncBopDelete(key, bkey, ElementFlagFilter.DO_NOT_FILTER, + true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testInsertAndGetByteArrayBkeyWithEflag() { + String value = "MyValue"; + byte[] bkey = new byte[] { 0, 1, 1, 1 }; + + int keySize = 1000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyBopKeyA" + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, EFLAG, + value, new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + Future>> f = mc.asyncBopGet( + key, bkey, ElementFlagFilter.DO_NOT_FILTER, false, + false); + Map> map = null; + try { + map = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + Element value2 = map.entrySet().iterator().next() + .getValue(); + if (!value.equals(value2.getValue())) { + errorCount++; + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (String key : keys) { + mc.asyncBopDelete(key, bkey, ElementFlagFilter.DO_NOT_FILTER, + true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testKeyAttributes() { + String value = "MyValue"; + long bkey = Long.MAX_VALUE; + + int keySize = 50; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyBopKeyA" + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.delete(key).get(); + } + + CollectionAttributes keyAttributes = new CollectionAttributes(); + keyAttributes.setExpireTime(10); + keyAttributes.setMaxCount(200); + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, EFLAG, + value, keyAttributes); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + fail(); + } + + for (String key : keys) { + CollectionAttributes attrs = mc.asyncGetAttr(key).get(500L, + TimeUnit.MILLISECONDS); + assertEquals(keyAttributes.getMaxCount(), attrs.getMaxCount()); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeout() throws Exception { + String value = "MyValue"; + long bkey = Long.MAX_VALUE; + + int keySize = 10000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + String key = "MyBopKey" + i; + keys[i] = key; + } + + try { + // DELETE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, EFLAG, + value, new CollectionAttributes()); + try { + future.get(1L, TimeUnit.MILLISECONDS); + Assert.fail("There is no timeout"); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testErrorCount() { + String value = "MyValue"; + long bkey = Long.MAX_VALUE; + + int keySize = 1200; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + String key = "MyBopKeyErrorCount" + i; + keys[i] = key; + } + + try { + // DELETE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncBopInsertBulk(Arrays.asList(keys), bkey, EFLAG, + value, null); + + Map map = future.get(2000L, + TimeUnit.MILLISECONDS); + + assertEquals(keySize, map.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/BopPipeUpdateTest.java b/src/test/manual/net/spy/memcached/bulkoperation/BopPipeUpdateTest.java new file mode 100644 index 000000000..7c4e4d3a3 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/BopPipeUpdateTest.java @@ -0,0 +1,279 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.collection.ElementFlagUpdate; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BopPipeUpdateTest extends BaseIntegrationTest { + + private static final String KEY = BopPipeUpdateTest.class.getSimpleName(); + private static final int elementCount = 1200; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + List> elements = new ArrayList>(); + + for (int i = 0; i < elementCount; i++) { + elements.add(new Element(i, "value" + i, new byte[] { 1, 1, + 1, 1 })); + } + + try { + // long start = System.currentTimeMillis(); + + CollectionAttributes attr = new CollectionAttributes(); + attr.setMaxCount(10000L); + + CollectionFuture> future = mc + .asyncBopPipedInsertBulk(KEY, elements, attr); + + Map map = future.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertTrue(map.isEmpty()); + + Map> map3 = mc.asyncBopGet(KEY, 0, 9999, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get(); + + Assert.assertEquals(elementCount, map3.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testBopPipeUpdateValue() { + + List> updateElements = new ArrayList>(); + for (int i = 0; i < elementCount; i++) { + updateElements.add(new Element(i, "updated" + i, + new ElementFlagUpdate(new byte[] { 1, 1, 1, 1 }))); + } + + try { + // long start = System.currentTimeMillis(); + + CollectionFuture> future2 = mc + .asyncBopPipedUpdateBulk(KEY, updateElements); + + Map map2 = future2.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + // System.out.println(map2.size()); + Assert.assertTrue(map2.isEmpty()); + + for (int i = 0; i < elementCount; i++) { + assertEquals( + "updated" + i, + mc.asyncBopGet(KEY, i, ElementFlagFilter.DO_NOT_FILTER, + false, false).get(1000L, TimeUnit.MILLISECONDS) + .get(new Long(i)).getValue()); + } + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + + } + + public void testBopPipeUpdateEFlags() { + + byte[] NEW_BYTE_EFLAG = new byte[] { 1, 1 }; + + List> updateElements = new ArrayList>(); + for (int i = 0; i < elementCount; i++) { + updateElements.add(new Element(i, null, + new ElementFlagUpdate(1, BitWiseOperands.AND, + NEW_BYTE_EFLAG))); + } + + try { + // long start = System.currentTimeMillis(); + + CollectionFuture> future2 = mc + .asyncBopPipedUpdateBulk(KEY, updateElements); + + Map map2 = future2.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + // System.out.println(map2.size()); + Assert.assertTrue(map2.isEmpty()); + + for (int i = 0; i < elementCount; i++) { + Element element = mc + .asyncBopGet(KEY, i, ElementFlagFilter.DO_NOT_FILTER, + false, false).get(1000L, TimeUnit.MILLISECONDS) + .get(new Long(i)); + + // System.out.println(element.getFlagByHex()); + assertEquals("value" + i, element.getValue()); + assertEquals("0x01010101", element.getFlagByHex()); + } + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBopPipeUpdateEFlagsReset() { + + List> updateElements = new ArrayList>(); + for (int i = 0; i < elementCount; i++) { + updateElements.add(new Element(i, null, + ElementFlagUpdate.RESET_FLAG)); + } + + try { + // long start = System.currentTimeMillis(); + + CollectionFuture> future2 = mc + .asyncBopPipedUpdateBulk(KEY, updateElements); + + Map map2 = future2.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + // System.out.println(map2.size()); + Assert.assertTrue(map2.isEmpty()); + + for (int i = 0; i < elementCount; i++) { + Element element = mc + .asyncBopGet(KEY, i, ElementFlagFilter.DO_NOT_FILTER, + false, false).get(1000L, TimeUnit.MILLISECONDS) + .get(new Long(i)); + + // System.out.println(element.getFlagByHex()); + assertEquals("value" + i, element.getValue()); + assertEquals(null, element.getFlagByHex()); + } + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBopPipeUpdateNotFoundElement() { + + try { + assertTrue(mc.asyncBopDelete(KEY, 0L, 1000L, + ElementFlagFilter.DO_NOT_FILTER, 600, false).get(1000L, + TimeUnit.MILLISECONDS)); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + + List> updateElements = new ArrayList>(); + for (int i = 0; i < elementCount; i++) { + updateElements.add(new Element(i, "updated" + i, + new ElementFlagUpdate(new byte[] { 1, 1, 1, 1 }))); + } + + try { + // long start = System.currentTimeMillis(); + + CollectionFuture> future2 = mc + .asyncBopPipedUpdateBulk(KEY, updateElements); + + Map map2 = future2.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + assertEquals(600, map2.size()); + assertEquals(CollectionResponse.NOT_FOUND_ELEMENT, map2.get(0) + .getResponse()); + + for (int i = 600; i < elementCount; i++) { + assertEquals( + "updated" + i, + mc.asyncBopGet(KEY, i, ElementFlagFilter.DO_NOT_FILTER, + false, false).get(1000L, TimeUnit.MILLISECONDS) + .get(new Long(i)).getValue()); + } + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + + } + + public void testBopPipeUpdateNotFoundKey() { + + String key2 = "NEW_BopPipeUpdateTest"; + + List> updateElements = new ArrayList>(); + for (int i = 0; i < elementCount; i++) { + updateElements.add(new Element(i, "updated" + i, + new ElementFlagUpdate(new byte[] { 1, 1, 1, 1 }))); + } + + try { + // long start = System.currentTimeMillis(); + + CollectionFuture> future2 = mc + .asyncBopPipedUpdateBulk(key2, updateElements); + + Map map2 = future2.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + assertEquals(elementCount, map2.size()); + assertEquals(CollectionResponse.NOT_FOUND, map2.get(0) + .getResponse()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/BulkSetTest.java b/src/test/manual/net/spy/memcached/bulkoperation/BulkSetTest.java new file mode 100644 index 000000000..4e6eaaa2b --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/BulkSetTest.java @@ -0,0 +1,278 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BulkSetTest extends BaseIntegrationTest { + + public void testInsertAndGet2() { + int TEST_COUNT = 3; + + try { + for (int keySize = 0; keySize < TEST_COUNT; keySize++) { + + // generate key + Map o = new HashMap(); + + for (int i = 0; i < 600; i++) { + o.put("MyKey" + i, "MyValue" + i); + } + + List keys = new ArrayList(o.keySet()); + + // REMOVE + for (String key : keys) { + mc.delete(key).get(); + } + + // SET + Future> future = mc + .asyncSetBulk(o, 60); + + Map errorList; + try { + errorList = future.get(20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + String k, v; + for (int i = 0; i < keys.size(); i++) { + k = keys.get(i); + v = (String) mc.asyncGet(k).get(); + + if (!v.equals(o.get(k))) { + errorCount++; + } + + mc.delete(k).get(); + } + + Assert.assertEquals("Error count is greater than 0.", 0, + errorCount); + + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testInsertAndGet() { + String value = "MyValue"; + + int TEST_COUNT = 64; + + try { + // SET null key + try { + mc.asyncSetBulk(null, 60, value); + } catch (NullPointerException e) { + + } catch (Exception e) { + Assert.fail(); + } + + for (int keySize = 0; keySize < TEST_COUNT; keySize++) { + // generate key + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyKey" + i; + } + + // REMOVE + for (String key : keys) { + mc.delete(key); + } + + // SET + Future> future = mc + .asyncSetBulk(Arrays.asList(keys), 60, value); + + Map errorList; + try { + errorList = future.get(20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + String v = (String) mc.get(key); + if (!value.equals(v)) { + errorCount++; + } + } + + Assert.assertEquals("Error count is greater than 0.", 0, + errorCount); + + // REMOVE + for (String key : keys) { + mc.delete(key); + } + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeout() { + int keySize = 100000; + + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyKey" + i; + } + + String value = "MyValue"; + + try { + Future> future = mc + .asyncSetBulk(Arrays.asList(keys), 60, value); + + try { + future.get(1000L, TimeUnit.MILLISECONDS); + Assert.fail("There is no timeout."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertAndGetUsingSingleClient() { + String value = "MyValue"; + + int TEST_COUNT = 64; + + try { + // SET null key + try { + mc.asyncSetBulk(null, 60, value); + } catch (NullPointerException e) { + + } catch (Exception e) { + Assert.fail(); + } + + for (int keySize = 0; keySize < TEST_COUNT; keySize++) { + // generate key + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyKey" + i; + } + + // REMOVE + for (String key : keys) { + mc.delete(key); + } + + // SET + Future> future = mc + .asyncSetBulk(Arrays.asList(keys), 60, value); + + Map errorList; + try { + errorList = future.get(20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + String v = (String) mc.get(key); + if (!value.equals(v)) { + errorCount++; + } + } + + Assert.assertEquals("Error count is greater than 0.", 0, + errorCount); + + // REMOVE + for (String key : keys) { + mc.delete(key); + } + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeoutUsingSingleClient() { + int keySize = 100000; + + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyKey" + i; + } + + String value = "MyValue"; + + try { + Future> future = mc + .asyncSetBulk(Arrays.asList(keys), 60, value); + + try { + future.get(1000L, TimeUnit.MILLISECONDS); + Assert.fail("There is no timeout."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/BulkSetVariousTypeTest.java b/src/test/manual/net/spy/memcached/bulkoperation/BulkSetVariousTypeTest.java new file mode 100644 index 000000000..10f6d7d1c --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/BulkSetVariousTypeTest.java @@ -0,0 +1,85 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class BulkSetVariousTypeTest extends BaseIntegrationTest { + + private static class MyBean implements Serializable { + private static final long serialVersionUID = -5977830942924286134L; + + private String name; + + public MyBean(String name) { + this.name = name; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof MyBean) { + return this.name.equals(((MyBean) obj).name); + } + return false; + } + } + + public void testInsertAndGet() { + Object[] valueList = { 1.0, 1000, 1000L, "String", + new MyBean("beanName") }; + String keyPrefix = "TypeTestKey"; + + try { + for (int i = 0; i < valueList.length; i++) { + String[] key = new String[] { keyPrefix + i }; + // REMOVE + mc.delete(key[0]); + + // SET + Future> future = mc + .asyncSetBulk(Arrays.asList(key), 60, valueList[i]); + + Map errorList; + try { + errorList = future.get(20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + Assert.fail(e.toString()); + } + + // GET + Object v = mc.get(key[0]); + Assert.assertEquals(String.format("K=%s, V=%s", key, v), + valueList[i], v); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkMultipleValueTest.java b/src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkMultipleValueTest.java new file mode 100644 index 000000000..2f0f7c411 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkMultipleValueTest.java @@ -0,0 +1,239 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class LopInsertBulkMultipleValueTest extends BaseIntegrationTest { + + private String key = "LopInsertBulkMultipleValueTest"; + + @Override + protected void tearDown() { + try { + mc.delete(key).get(); + super.tearDown(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testInsertAndGet() { + String value = "MyValue"; + + int valueCount = 500; + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue"; + } + + try { + // REMOVE + mc.asyncLopDelete(key, 0, 4000, true).get(); + + // SET + Future> future = mc + .asyncLopPipedInsertBulk(key, 0, Arrays.asList(valueList), + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue(errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + List list = null; + Future> f = mc.asyncLopGet(key, 0, valueCount, false, + false); + try { + list = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + + Assert.assertNotNull("List is null.", list); + Assert.assertTrue("Cached list is empty.", !list.isEmpty()); + Assert.assertEquals(valueCount, list.size()); + + for (Object o : list) { + if (!value.equals(o)) { + errorCount++; + } + } + Assert.assertEquals(valueCount, list.size()); + Assert.assertEquals(0, errorCount); + + // REMOVE + mc.asyncLopDelete(key, 0, 4000, true).get(); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testTimeout() { + int valueCount = 500; + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue"; + } + + try { + // SET + Future> future = mc + .asyncLopPipedInsertBulk(key, 0, Arrays.asList(valueList), + new CollectionAttributes()); + try { + future.get(1L, TimeUnit.NANOSECONDS); + Assert.fail("There is no timeout."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(e.getMessage()); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertAndGetUsingSingleClient() { + String value = "MyValue"; + + int valueCount = 500; + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue"; + } + + try { + // REMOVE + mc.delete(key).get(); + + // SET + Future> future = mc + .asyncLopPipedInsertBulk(key, 0, Arrays.asList(valueList), + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue(errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + List list = null; + Future> f = mc.asyncLopGet(key, 0, valueCount, false, + false); + try { + list = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + + Assert.assertNotNull("List is null.", list); + Assert.assertTrue("Cached list is empty.", !list.isEmpty()); + Assert.assertEquals(valueCount, list.size()); + + for (Object o : list) { + if (!value.equals(o)) { + errorCount++; + } + } + Assert.assertEquals(valueCount, list.size()); + Assert.assertEquals(0, errorCount); + + // REMOVE + mc.asyncLopDelete(key, 0, 4000, true).get(); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeoutUsingSingleClient() { + int valueCount = 500; + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue"; + } + + try { + // SET + Future> future = mc + .asyncLopPipedInsertBulk(key, 0, Arrays.asList(valueList), + new CollectionAttributes()); + try { + future.get(1L, TimeUnit.NANOSECONDS); + Assert.fail("There is no timeout."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testErrorCount() { + int valueCount = 1200; + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue"; + } + + try { + // SET + Future> future = mc + .asyncLopPipedInsertBulk(key, 0, Arrays.asList(valueList), + null); + + Map map = future.get(1000L, + TimeUnit.MILLISECONDS); + assertEquals(valueCount, map.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkTest.java b/src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkTest.java new file mode 100644 index 000000000..43d1a3236 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/LopInsertBulkTest.java @@ -0,0 +1,143 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class LopInsertBulkTest extends BaseIntegrationTest { + + public void testInsertAndGet() { + String value = "MyValue"; + int keySize = 500; + + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyLopKey" + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.asyncLopDelete(key, 0, 4000, true).get(); + } + + // SET + Future> future = mc + .asyncLopInsertBulk(Arrays.asList(keys), 0, value, + new CollectionAttributes()); + try { + Map errorList = future.get( + 100L, TimeUnit.MILLISECONDS); + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + } + + // GET + int errorCount = 0; + for (String key : keys) { + Future> f = mc.asyncLopGet(key, 0, false, false); + List cachedList = null; + try { + cachedList = f.get(); + } catch (Exception e) { + f.cancel(true); + } + Object value2 = cachedList.get(0); + if (!value.equals(value2)) { + errorCount++; + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (String key : keys) { + mc.asyncLopDelete(key, 0, 4000, true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeout() { + String value = "MyValue"; + int keySize = 250000; + + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyLopKey" + i; + } + + try { + // SET + Future> future = mc + .asyncLopInsertBulk(Arrays.asList(keys), 0, value, + new CollectionAttributes()); + try { + future.get(1L, TimeUnit.MILLISECONDS); + Assert.fail("There is no timeout"); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testCountError() { + String value = "MyValue"; + + int keySize = 1200; + + String[] keys = new String[keySize]; + + try { + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyLopKey" + i; + mc.delete(keys[i]).get(); + } + + // SET + Future> future = mc + .asyncLopInsertBulk(Arrays.asList(keys), 0, value, null); + + Map map = future.get(1000L, + TimeUnit.MILLISECONDS); + assertEquals(keySize, map.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/PipeInsertTest.java b/src/test/manual/net/spy/memcached/bulkoperation/PipeInsertTest.java new file mode 100644 index 000000000..b751e9b43 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/PipeInsertTest.java @@ -0,0 +1,153 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class PipeInsertTest extends BaseIntegrationTest { + + private static final String KEY = PipeInsertTest.class.getSimpleName(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testBopPipeInsert() { + int elementCount = 5000; + + List> elements = new ArrayList>(); + + for (int i = 0; i < elementCount; i++) { + elements.add(new Element(i, "value" + i, + new byte[] { (byte) 1 })); + } + + try { + long start = System.currentTimeMillis(); + + CollectionAttributes attr = new CollectionAttributes(); + + CollectionFuture> future = mc + .asyncBopPipedInsertBulk(KEY, elements, attr); + + Map map = future.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertTrue(map.isEmpty()); + + Map> map3 = mc.asyncBopGet(KEY, 0, 9999, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get(); + + Assert.assertEquals(4000, map3.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBopPipeInsert2() { + int elementCount = 5000; + Map elements = new TreeMap(); + for (long i = 0; i < elementCount; i++) { + elements.put(i, "value" + i); + } + + try { + long start = System.currentTimeMillis(); + + CollectionAttributes attr = new CollectionAttributes(); + + CollectionFuture> future = mc + .asyncBopPipedInsertBulk(KEY, elements, attr); + + Map map = future.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertTrue(map.isEmpty()); + + Map> map3 = mc.asyncBopGet(KEY, 0, 9999, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get(); + + Assert.assertEquals(4000, map3.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testLopPipeInsert() { + int elementCount = 5000; + + List elements = new ArrayList(elementCount); + + for (int i = 0; i < elementCount; i++) { + elements.add("value" + i); + } + + try { + long start = System.currentTimeMillis(); + + CollectionAttributes attr = new CollectionAttributes(); + + CollectionFuture> future = mc + .asyncLopPipedInsertBulk(KEY, -1, elements, attr); + + Map map = future.get(5000L, + TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - start + "ms"); + + Assert.assertTrue(map.isEmpty()); + + List list = mc.asyncLopGet(KEY, 0, 9999, false, false) + .get(); + + Assert.assertEquals(4000, list.size()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/bulkoperation/SetBulkTimeoutTest.java b/src/test/manual/net/spy/memcached/bulkoperation/SetBulkTimeoutTest.java new file mode 100644 index 000000000..a4ebb52c5 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/SetBulkTimeoutTest.java @@ -0,0 +1,63 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Arrays; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.junit.Ignore; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.ops.CollectionOperationStatus; + +@Ignore +public class SetBulkTimeoutTest extends BaseIntegrationTest { + + public void testTimeoutUsingSingleClient() { + String value = "MyValue"; + + int keySize = 250000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = "MyBopKey" + i; + } + + try { + // SET + Future> future = mc + .asyncSetBulk(Arrays.asList(keys), 60, value); + try { + future.get(10000L, TimeUnit.MILLISECONDS); + Assert.fail("Timeout is not simulated."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + Assert.fail(); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkMultipleValueTest.java b/src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkMultipleValueTest.java new file mode 100644 index 000000000..153244095 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkMultipleValueTest.java @@ -0,0 +1,241 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class SopInsertBulkMultipleValueTest extends BaseIntegrationTest { + + public void testInsertAndGet() { + String key = "testInsertAndGet"; + String prefix = "MyValue"; + + int valueCount = mc.getMaxPipedItemCount(); + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = String.format("%s%d", prefix, i); + } + + try { + // REMOVE + for (Object v : valueList) { + mc.asyncSopDelete(key, v, true).get(); + } + + // SET + Future> future = mc + .asyncSopPipedInsertBulk(key, Arrays.asList(valueList), + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue(errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + Future> f = mc.asyncSopGet(key, valueCount, false, + false); + Set list = null; + try { + list = f.get(10000L, TimeUnit.MILLISECONDS); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + + Assert.assertNotNull("Cached list is null.", list); + Assert.assertTrue("Cached list is empty.", !list.isEmpty()); + + for (Object o : list) { + if (!((String) o).startsWith(prefix)) { + errorCount++; + } + } + + Assert.assertEquals(valueCount, list.size()); + Assert.assertEquals(0, errorCount); + + // REMOVE + for (Object v : valueList) { + mc.asyncSopDelete(key, v, true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeout() { + String key = "testTimeout"; + int valueCount = mc.getMaxPipedItemCount(); + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue" + i; + } + + try { + // SET + Future> future = mc + .asyncSopPipedInsertBulk(key, Arrays.asList(valueList), + new CollectionAttributes()); + try { + future.get(1L, TimeUnit.NANOSECONDS); + Assert.fail("There is no timeout."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(e.getMessage()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testInsertAndGetSingleClient() { + String key = "testInsertAndGetSingleClient"; + String prefix = "MyValue"; + int valueCount = mc.getMaxPipedItemCount(); + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = String.format("%s%d", prefix, i); + } + + try { + // REMOVE + for (Object v : valueList) { + mc.asyncSopDelete(key, v, true).get(); + } + + // SET + Future> future = mc + .asyncSopPipedInsertBulk(key, Arrays.asList(valueList), + new CollectionAttributes()); + try { + Map errorList = future.get( + 20000L, TimeUnit.MILLISECONDS); + Assert.assertTrue(errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + Future> f = mc.asyncSopGet(key, valueCount, false, + false); + Set list = null; + try { + list = f.get(10000L, TimeUnit.MILLISECONDS); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + + Assert.assertNotNull("Cached list is null.", list); + Assert.assertTrue("Cached list is empty.", !list.isEmpty()); + + for (Object o : list) { + if (!((String) o).startsWith(prefix)) { + errorCount++; + } + } + + Assert.assertEquals(valueCount, list.size()); + Assert.assertEquals(0, errorCount); + + // REMOVE + for (Object v : valueList) { + mc.asyncSopDelete(key, v, true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeoutUsingSingleClient() { + String key = "testTimeoutUsingSingleClient"; + int valueCount = mc.getMaxPipedItemCount(); + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue" + i; + } + + try { + // SET + Future> future = mc + .asyncSopPipedInsertBulk(key, Arrays.asList(valueList), + new CollectionAttributes()); + try { + Map errorList = future.get( + 1L, TimeUnit.NANOSECONDS); + Assert.assertTrue(errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + return; + } catch (Exception e) { + future.cancel(true); + Assert.fail(); + } + Assert.fail(); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testErrorCount() { + String key = "testErrorCount"; + int valueCount = 1200; + Object[] valueList = new Object[valueCount]; + for (int i = 0; i < valueList.length; i++) { + valueList[i] = "MyValue" + i; + } + + try { + mc.delete(key).get(); + + // SET + Future> future = mc + .asyncSopPipedInsertBulk(key, Arrays.asList(valueList), + null); + + Map map = future.get(2000L, + TimeUnit.MILLISECONDS); + assertEquals(valueCount, map.size()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkTest.java b/src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkTest.java new file mode 100644 index 000000000..fc8280d40 --- /dev/null +++ b/src/test/manual/net/spy/memcached/bulkoperation/SopInsertBulkTest.java @@ -0,0 +1,153 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.bulkoperation; + +import java.util.Arrays; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class SopInsertBulkTest extends BaseIntegrationTest { + + private String KEY = SopInsertBulkTest.class.getSimpleName(); + + public void testInsertAndGet() { + String value = "MyValue"; + int keySize = 500; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = KEY + i; + } + + try { + // REMOVE + for (String key : keys) { + mc.asyncSopDelete(key, value, true).get(); + } + + // SET + Future> future = mc + .asyncSopInsertBulk(Arrays.asList(keys), value, + new CollectionAttributes()); + try { + Map errorList = future.get( + 100L, TimeUnit.MILLISECONDS); + Assert.assertTrue("Error list is not empty.", + errorList.isEmpty()); + } catch (TimeoutException e) { + future.cancel(true); + e.printStackTrace(); + } + + // GET + int errorCount = 0; + for (String key : keys) { + Future> f = mc.asyncSopGet(key, 1, false, false); + Set cachedList = null; + try { + cachedList = f.get(); + } catch (Exception e) { + f.cancel(true); + e.printStackTrace(); + } + + Assert.assertTrue("Cached list is empty.", + !cachedList.isEmpty()); + + for (Object o : cachedList) { + if (!value.equals(o)) { + errorCount++; + } + } + } + Assert.assertEquals("Error count is greater than 0.", 0, errorCount); + + // REMOVE + for (String key : keys) { + mc.asyncSopDelete(key, value, true).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(); + } + } + + public void testTimeout() { + String value = "MyValue"; + int keySize = 100000; + String[] keys = new String[keySize]; + for (int i = 0; i < keys.length; i++) { + keys[i] = KEY + i; + } + + try { + long s = 0; + // SET + s = System.currentTimeMillis(); + Future> future = mc + .asyncSopInsertBulk(Arrays.asList(keys), value, + new CollectionAttributes()); + + try { + future.get(1L, TimeUnit.MILLISECONDS); + + // System.out.println(System.currentTimeMillis() - s); + + Assert.fail("There is no timeout."); + } catch (TimeoutException e) { + future.cancel(true); + return; + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail("ERROR"); + } + } + + public void testErrorCount() { + String value = "MyValue"; + int keySize = 1200; + + String[] keys = new String[keySize]; + + try { + for (int i = 0; i < keys.length; i++) { + keys[i] = KEY + i; + mc.delete(keys[i]).get(); + } + + // SET + Future> future = mc + .asyncSopInsertBulk(Arrays.asList(keys), value, null); + + Map map = future.get(1000L, + TimeUnit.MILLISECONDS); + assertEquals(keySize, map.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail("ERROR"); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/collection/BaseIntegrationTest.java b/src/test/manual/net/spy/memcached/collection/BaseIntegrationTest.java new file mode 100644 index 000000000..5912f9a26 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/BaseIntegrationTest.java @@ -0,0 +1,167 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.net.SocketAddress; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import junit.framework.TestCase; +import net.spy.memcached.AddrUtil; +import net.spy.memcached.ArcusClient; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.ConnectionObserver; + +import org.junit.Ignore; + +@Ignore +public class BaseIntegrationTest extends TestCase { + + protected static String ZK_HOST = System.getProperty("ZK_HOST", + "127.0.0.1:2181"); + + protected static String ZK_SERVICE_ID = System.getProperty("ZK_SERVICE_ID", + "test"); + + protected static String ARCUS_HOST = System.getProperty("ARCUS_HOST", + "127.0.0.1:11211"); + + protected static boolean USE_ZK = Boolean.valueOf(System.getProperty( + "USE_ZK", "false")); + + protected static boolean SHUTDOWN_AFTER_EACH_TEST = USE_ZK; + + protected ArcusClient mc = null; + + static { + System.out.println("---------------------------------------------"); + System.out.println("[ArcusClient initialization info.]"); + System.out.println("\tUSE_ZK = " + USE_ZK); + System.out.println("\tSHUTDOWN_AFTER_EACH_TEST = " + USE_ZK); + + if (USE_ZK) { + System.out.println("\tZK_HOST = " + ZK_HOST); + System.out.println("\tZK_SERVICE_ID = " + ZK_SERVICE_ID); + } else { + System.out.println("\tARCUS_HOST = " + ARCUS_HOST); + } + + System.out.println("---------------------------------------------"); + } + + @Override + protected void setUp() throws Exception { + try { + System.setProperty("net.spy.log.LoggerImpl", + "net.spy.memcached.compat.log.Log4JLogger"); + System.setProperty("arcus.mbean", "true"); + if (USE_ZK) { + openFromZK(); + } else { + openDirect(); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + @Override + protected void tearDown() throws Exception { + try { + if (SHUTDOWN_AFTER_EACH_TEST) { + shutdown(); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + protected void openFromZK() { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + mc = ArcusClient.createArcusClient(ZK_HOST, ZK_SERVICE_ID, cfb); + } + + protected void openDirect() throws Exception { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + + final CountDownLatch latch = new CountDownLatch( + ARCUS_HOST.split(",").length); + final ConnectionObserver obs = new ConnectionObserver() { + + @Override + public void connectionEstablished(SocketAddress sa, + int reconnectCount) { + latch.countDown(); + } + + @Override + public void connectionLost(SocketAddress sa) { + assert false : "Connection is failed."; + } + + }; + cfb.setInitialObservers(Collections.singleton(obs)); + + mc = new ArcusClient(cfb.build(), AddrUtil.getAddresses(ARCUS_HOST)); + latch.await(); + } + + protected void shutdown() throws Exception { + mc.shutdown(); + } + + protected void addToList(String key, Object[] items) throws Exception { + for (Object each : items) { + assertTrue(mc.asyncLopInsert(key, -1, each, + new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS)); + } + } + + protected void addToSet(String key, Object[] items) throws Exception { + for (Object each : items) { + assertTrue(mc.asyncSopInsert(key, each, new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS)); + } + } + + protected void addToBTree(String key, Object[] items) throws Exception { + for (int i = 0; i < items.length; i++) { + assertTrue(mc.asyncBopInsert(key, i, null, items[i], + new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS)); + } + } + + protected void deleteList(String key, int size) throws Exception { + mc.asyncLopDelete(key, 0, size, true).get(1000, TimeUnit.MILLISECONDS); + } + + protected void deleteSet(String key, Object[] items) throws Exception { + for (Object d : items) { + mc.asyncSopDelete(key, d, true).get(1000, TimeUnit.MILLISECONDS); + } + } + + protected void deleteBTree(String key, Object[] values) throws Exception { + for (int i = 0; i < values.length; i++) { + mc.asyncBopDelete(key, i, ElementFlagFilter.DO_NOT_FILTER, true) + .get(1000, TimeUnit.MILLISECONDS); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/CollectionFutureTest.java b/src/test/manual/net/spy/memcached/collection/CollectionFutureTest.java new file mode 100644 index 000000000..31f989b9d --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/CollectionFutureTest.java @@ -0,0 +1,96 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.OperationStatus; + +public class CollectionFutureTest extends BaseIntegrationTest { + + private String key = "CollectionFutureTest"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.asyncBopDelete(key, 0, 100, ElementFlagFilter.DO_NOT_FILTER, 0, true); + } + + public void testAfterSuccess() throws Exception { + CollectionFuture future; + OperationStatus status; + + future = (CollectionFuture) mc.asyncBopInsert(key, 0, null, + "hello", new CollectionAttributes()); + + // OperationStatus should be null before operation completion + // status = future.getOperationStatus(); + // assertNull(status); + + // After operation completion (SUCCESS) + Boolean success = future.get(1000, TimeUnit.MILLISECONDS); + status = future.getOperationStatus(); + + assertTrue(success); + assertNotNull(status); + assertTrue(status.isSuccess()); + assertEquals("CREATED_STORED", status.getMessage()); + } + + public void testAfterFailure() throws Exception { + CollectionFuture>> future; + OperationStatus status; + + future = (CollectionFuture>>) mc.asyncBopGet( + key, 0, ElementFlagFilter.DO_NOT_FILTER, false, false); + + // OperationStatus should be null before operation completion + // status = future.getOperationStatus(); + // assertNull(status); + + // After operation completion (FAILURE) + Map> result = future.get(1000, + TimeUnit.MILLISECONDS); + status = future.getOperationStatus(); + + assertNull(result); + assertNotNull(status); + assertFalse(status.isSuccess()); + assertEquals("NOT_FOUND", status.getMessage()); + } + + public void testTimeout() throws Exception { + CollectionFuture future; + OperationStatus status; + + future = (CollectionFuture) mc.asyncBopInsert(key, 0, null, + "hello", new CollectionAttributes()); + + try { + future.get(1, TimeUnit.NANOSECONDS); + } catch (Exception e) { + future.cancel(true); + } + + status = future.getOperationStatus(); + + // assertNull(status); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/CollectionMaxElementSize.java b/src/test/manual/net/spy/memcached/collection/CollectionMaxElementSize.java new file mode 100644 index 000000000..62a75bd47 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/CollectionMaxElementSize.java @@ -0,0 +1,51 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.transcoders.CollectionTranscoder; + +public class CollectionMaxElementSize extends BaseIntegrationTest { + + private String key = "CollectionMaxElementSize"; + + public void testExceed() throws Exception { + CollectionFuture future; + future = mc.asyncLopInsert(key, -1, "test", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < CollectionTranscoder.MAX_SIZE + 1; i++) { + sb.append(i % 9); + } + + String tooLargeValue = sb.toString(); + assertEquals(CollectionTranscoder.MAX_SIZE + 1, tooLargeValue.length()); + + try { + future = mc.asyncLopInsert(key, -1, tooLargeValue, + new CollectionAttributes()); + fail(); + } catch (IllegalArgumentException e) { + e.printStackTrace(); + assertTrue(e.getMessage().contains("Cannot cache data larger than")); + } + } + +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/collection/ElementFlagFilterTest.java b/src/test/manual/net/spy/memcached/collection/ElementFlagFilterTest.java new file mode 100644 index 000000000..604fc646e --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/ElementFlagFilterTest.java @@ -0,0 +1,98 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection; + +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; +import junit.framework.Assert; +import junit.framework.TestCase; + +public class ElementFlagFilterTest extends TestCase { + + public void testValidate1() { + try { + new ElementFlagFilter(null, null); + } catch (NullPointerException e) { + return; + } + Assert.fail("Argument validation failed."); + } + + public void testValidate2() { + try { + new ElementFlagFilter(null, "".getBytes()); + } catch (NullPointerException e) { + return; + } + Assert.fail("Argument validation failed."); + } + + public void testValidate3() { + try { + new ElementFlagFilter(CompOperands.Equal, null); + } catch (NullPointerException e) { + return; + } + Assert.fail("Argument validation failed."); + } + + public void testZeroLengthCompValue() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "".getBytes()); + filter.toString(); + } catch (IllegalArgumentException e) { + return; + } catch (Exception e) { + fail(e.toString()); + } + fail("Oops. Something's going wrong."); + } + + public void testZeroLengthBitCompValue() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "A".getBytes()); + + filter.setBitOperand(BitWiseOperands.AND, "".getBytes()); + filter.toString(); + } catch (IllegalArgumentException e) { + return; + } catch (Exception e) { + fail(e.toString()); + } + fail("Oops. Something's going wrong."); + } + + public void testConstruct() { + String src = "ABC"; + + ElementFlagFilter filter = new ElementFlagFilter(CompOperands.Equal, + src.getBytes()); + + Assert.assertEquals("0 EQ 0x414243", filter.toString()); + + filter.setCompareOffset(2); + + Assert.assertEquals("2 EQ 0x414243", filter.toString()); + + filter.setBitOperand(BitWiseOperands.AND, src.getBytes()); + + Assert.assertEquals("2 & 0x414243 EQ 0x414243", filter.toString()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/BTreeGetAttrTest.java b/src/test/manual/net/spy/memcached/collection/attribute/BTreeGetAttrTest.java new file mode 100644 index 000000000..f7657c2cf --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/BTreeGetAttrTest.java @@ -0,0 +1,188 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.util.BTreeUtil; + +public class BTreeGetAttrTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE = "VALUE"; + private final byte[] EFLAG = "eflag".getBytes(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testGetTrimmedTest() { + try { + // create with default. + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setMaxCount(3L); + Assert.assertTrue(mc.asyncBopCreate(KEY, ElementValueType.STRING, + attribute).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY, 0L, EFLAG, VALUE, + attribute).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, 1L, EFLAG, VALUE, + attribute).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, 2L, EFLAG, VALUE, + attribute).get()); + + // get trimmed + CollectionAttributes btreeAttrs = mc.asyncGetAttr(KEY).get(); + if (btreeAttrs.getTrimmed() != null) { // not support + Assert.assertEquals(new Long(0L), btreeAttrs.getTrimmed()); + } + + Assert.assertTrue(mc.asyncBopInsert(KEY, 3L, EFLAG, VALUE, + attribute).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, 4L, EFLAG, VALUE, + attribute).get()); + + // get trimmed + btreeAttrs = mc.asyncGetAttr(KEY).get(); + if (btreeAttrs.getTrimmed() != null) { // not support + Assert.assertEquals(new Long(1L), btreeAttrs.getTrimmed()); + } + + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetMinMaxBkeyTest() { + try { + Assert.assertTrue(mc.asyncBopCreate(KEY, ElementValueType.STRING, + new CollectionAttributes()).get()); + + CollectionAttributes btreeAttrs = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(btreeAttrs.getMinBkey()); + Assert.assertNull(btreeAttrs.getMaxBkey()); + + Assert.assertTrue(mc.asyncBopInsert(KEY, 0L, EFLAG, VALUE, null) + .get()); + + btreeAttrs = mc.asyncGetAttr(KEY).get(); + if (btreeAttrs.getMinBkey() != null) { // not support + Assert.assertEquals(new Long(0L), btreeAttrs.getMinBkey()); + Assert.assertEquals(new Long(0L), btreeAttrs.getMaxBkey()); + } + + Assert.assertTrue(mc.asyncBopInsert(KEY, 1L, EFLAG, VALUE, null) + .get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, 2L, EFLAG, VALUE, null) + .get()); + + btreeAttrs = mc.asyncGetAttr(KEY).get(); + if (btreeAttrs.getMinBkey() != null) { // not support + Assert.assertEquals(new Long(0L), btreeAttrs.getMinBkey()); + Assert.assertEquals(new Long(2L), btreeAttrs.getMaxBkey()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetMinMaxBkeyByBytesTest() { + try { + Assert.assertTrue(mc.asyncBopCreate(KEY, ElementValueType.STRING, + new CollectionAttributes()).get()); + + CollectionAttributes btreeAttrs = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(btreeAttrs.getMinBkeyByBytes()); + Assert.assertNull(btreeAttrs.getMaxBkeyByBytes()); + + Assert.assertTrue(mc.asyncBopInsert(KEY, new byte[] { 0, 0, 1 }, + EFLAG, VALUE, null).get()); + btreeAttrs = mc.asyncGetAttr(KEY).get(); + + if (btreeAttrs.getMinBkeyByBytes() != null) { // not support + Assert.assertEquals("0x000001", + BTreeUtil.toHex(btreeAttrs.getMinBkeyByBytes())); + Assert.assertEquals("0x000001", + BTreeUtil.toHex(btreeAttrs.getMaxBkeyByBytes())); + } + + Assert.assertTrue(mc.asyncBopInsert(KEY, new byte[] { 1, 0, 1 }, + EFLAG, VALUE, null).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, new byte[] { 2, 0, 1 }, + EFLAG, VALUE, null).get()); + btreeAttrs = mc.asyncGetAttr(KEY).get(); + if (btreeAttrs.getMinBkeyByBytes() != null) { // not support + Assert.assertEquals("0x000001", + BTreeUtil.toHex(btreeAttrs.getMinBkeyByBytes())); + Assert.assertEquals("0x020001", + BTreeUtil.toHex(btreeAttrs.getMaxBkeyByBytes())); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetOverflowAttrTest() { + try { + Assert.assertTrue(mc.asyncBopCreate(KEY, ElementValueType.STRING, + new CollectionAttributes()).get()); + + CollectionAttributes btreeAttrs = mc.asyncGetAttr(KEY).get(); + CollectionOverflowAction overflowAction = btreeAttrs + .getOverflowAction(); + Assert.assertEquals(CollectionOverflowAction.smallest_trim, + overflowAction); + + // test setattr/getattr smallest_silent_trim + CollectionAttributes btreeAttrs2 = new CollectionAttributes(); + btreeAttrs2 + .setOverflowAction(CollectionOverflowAction.smallest_silent_trim); + Assert.assertTrue(mc.asyncSetAttr(KEY, btreeAttrs2).get()); + + btreeAttrs = mc.asyncGetAttr(KEY).get(); + overflowAction = btreeAttrs.getOverflowAction(); + Assert.assertEquals(CollectionOverflowAction.smallest_silent_trim, + overflowAction); + + // test setattr/getattr largest_silent_trim + btreeAttrs2 = new CollectionAttributes(); + btreeAttrs2 + .setOverflowAction(CollectionOverflowAction.largest_silent_trim); + Assert.assertTrue(mc.asyncSetAttr(KEY, btreeAttrs2).get()); + + btreeAttrs = mc.asyncGetAttr(KEY).get(); + overflowAction = btreeAttrs.getOverflowAction(); + Assert.assertEquals(CollectionOverflowAction.largest_silent_trim, + overflowAction); + + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/GetAttrTest.java b/src/test/manual/net/spy/memcached/collection/attribute/GetAttrTest.java new file mode 100644 index 000000000..6839948a2 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/GetAttrTest.java @@ -0,0 +1,97 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.CollectionType; +import net.spy.memcached.internal.CollectionFuture; + +public class GetAttrTest extends BaseIntegrationTest { + + private final String[] list = { "hello1", "hello2", "hello3" }; + + public void testGetAttr_KV() throws Exception { + String key = "testGetAttr_KV"; + + mc.set(key, 100, "v").get(); + + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + + assertNotNull(rattrs); + assertEquals(new Integer(0), rattrs.getFlags()); + assertEquals(CollectionType.kv, rattrs.getType()); + } + + public void testGetAttr_ModifiedAttribute() throws Exception { + String key = "getattr_modified_attribute"; + + addToList(key, list); + + // Set attributes + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(1000); + attrs.setExpireTime(10000); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // 3. Get attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + + assertNotNull(rattrs); + assertEquals(attrs.getMaxCount(), rattrs.getMaxCount()); + // assertEquals(attrs.getExpireTime(), rattrs.getExpireTime()); + assertEquals(CollectionType.list, rattrs.getType()); + + deleteList(key, list.length); + } + + public void testGetAttr_DefaultAttribute() throws Exception { + String key = "getattr_default_attribute"; + + addToList(key, list); + + // Get attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(CollectionAttributes.DEFAULT_FLAGS, rattrs.getFlags()); + assertEquals(CollectionAttributes.DEFAULT_EXPIRETIME, + rattrs.getExpireTime()); + assertEquals(CollectionAttributes.DEFAULT_MAXCOUNT, + rattrs.getMaxCount()); + assertEquals(CollectionAttributes.DEFAULT_OVERFLOWACTION, + rattrs.getOverflowAction()); + + deleteList(key, list.length); + } + + public void testGetAttr_KeyNotFound() throws Exception { + CollectionFuture future = mc + .asyncGetAttr("NOT_EXISTS"); + + CollectionAttributes rattrs = future.get(1000, TimeUnit.MILLISECONDS); + + assertNull(rattrs); + assertEquals(CollectionResponse.NOT_FOUND, future.getOperationStatus() + .getResponse()); + } +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/MaxBkeyRangeTest.java b/src/test/manual/net/spy/memcached/collection/attribute/MaxBkeyRangeTest.java new file mode 100644 index 000000000..b95775bca --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/MaxBkeyRangeTest.java @@ -0,0 +1,142 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import java.util.Arrays; +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementValueType; + +public class MaxBkeyRangeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 1; + private final byte[] BYTE_BKEY = new byte[] { (byte) 1 }; + private final String VALUE = "VALUE"; + private final byte[] EFLAG = "eflag".getBytes(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testMaxBkeyRangeTest() { + try { + // create with default. + CollectionAttributes attribute = new CollectionAttributes(); + + Assert.assertTrue(mc.asyncBopCreate(KEY, ElementValueType.STRING, + attribute).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, EFLAG, VALUE, + attribute).get()); + + // get current maxbkeyrange + CollectionAttributes btreeAttrs = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(btreeAttrs.getMaxBkeyRangeByBytes()); + Assert.assertEquals(new Long(0), btreeAttrs.getMaxBkeyRange()); + + // change maxbkeyrange + attribute.setMaxBkeyRange(2L); + Assert.assertTrue(mc.asyncSetAttr(KEY, attribute).get()); + + // get current maxbkeyrange + CollectionAttributes changedBtreeAttrs = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(btreeAttrs.getMaxBkeyRangeByBytes()); + Assert.assertEquals(new Long(2), + changedBtreeAttrs.getMaxBkeyRange()); + + // insert bkey + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 1, EFLAG, VALUE, + attribute).get()); + + // get all of bkeys + Map> map = mc.asyncBopGet(KEY, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get(); + Assert.assertEquals(2, map.size()); + Assert.assertTrue(map.containsKey(BKEY)); + Assert.assertTrue(map.containsKey(BKEY + 1)); + + // insert one more bkey + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 2, EFLAG, VALUE, + attribute).get()); + + // get all of bkeys + Map> map2 = mc.asyncBopGet(KEY, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get(); + Assert.assertEquals(3, map2.size()); + Assert.assertTrue(map2.containsKey(BKEY)); + Assert.assertTrue(map2.containsKey(BKEY + 1)); + Assert.assertTrue(map2.containsKey(BKEY + 2)); + + // insert one more bkey again + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 3, EFLAG, VALUE, + attribute).get()); + + // get all of bkeys + Map> map3 = mc.asyncBopGet(KEY, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get(); + Assert.assertEquals(3, map3.size()); + Assert.assertTrue(map3.containsKey(BKEY + 1)); + Assert.assertTrue(map3.containsKey(BKEY + 2)); + Assert.assertTrue(map3.containsKey(BKEY + 3)); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testMaxBkeyRangeByBytesTest() { + try { + // create with default. + CollectionAttributes attribute = new CollectionAttributes(); + + Assert.assertTrue(mc.asyncBopCreate(KEY, ElementValueType.STRING, + attribute).get()); + + Assert.assertTrue(mc.asyncBopInsert(KEY, BYTE_BKEY, EFLAG, VALUE, + attribute).get()); + + // get current maxbkeyrange + CollectionAttributes btreeAttrs = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(btreeAttrs.getMaxBkeyRangeByBytes()); + + // change maxbkeyrange + attribute.setMaxBkeyRangeByBytes(new byte[] { (byte) 2 }); + Assert.assertTrue(mc.asyncSetAttr(KEY, attribute).get()); + + // get current maxbkeyrange + CollectionAttributes changedBtreeAttrs = mc.asyncGetAttr(KEY).get(); + Assert.assertTrue(Arrays.equals(new byte[] { (byte) 2 }, + changedBtreeAttrs.getMaxBkeyRangeByBytes())); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/SetAttrTest.java b/src/test/manual/net/spy/memcached/collection/attribute/SetAttrTest.java new file mode 100644 index 000000000..2a6958d08 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/SetAttrTest.java @@ -0,0 +1,27 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import net.spy.memcached.collection.BaseIntegrationTest; + +public class SetAttrTest extends BaseIntegrationTest { + + public void testSetAttr() throws Exception { + + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/UnReadableBTreeTest.java b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableBTreeTest.java new file mode 100644 index 000000000..8dde9f37f --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableBTreeTest.java @@ -0,0 +1,130 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.internal.CollectionFuture; + +public class UnReadableBTreeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE = "VALUE"; + private final long BKEY = 10L; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateUnreadableBTreeTest() { + try { + // create unreadable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(false); + + Boolean insertResult = mc.asyncBopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertFalse(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, null) + .get()); + + // get an item + CollectionFuture>> f = mc.asyncBopGet( + KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, false, false); + Assert.assertNull(f.get()); + Assert.assertEquals("UNREADABLE", f.getOperationStatus() + .getMessage()); + + // set readable + attribute.setReadable(true); + Assert.assertTrue(mc.asyncSetAttr(KEY, attribute).get()); + + // get an item again + f = mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false); + Map> map = f.get(); + + Assert.assertNotNull(map); + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testCreateReadableBTreeTest() { + try { + // create readable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(true); + + Boolean insertResult = mc.asyncBopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertTrue(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, null) + .get()); + + // get an item + CollectionFuture>> f = mc.asyncBopGet( + KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, false, false); + + Map> map = f.get(); + Assert.assertNotNull(map); + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/UnReadableExtendedBTreeTest.java b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableExtendedBTreeTest.java new file mode 100644 index 000000000..d7505da5d --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableExtendedBTreeTest.java @@ -0,0 +1,137 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.internal.CollectionFuture; + +public class UnReadableExtendedBTreeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE = "VALUE"; + private final byte[] BKEY = new byte[] { (byte) 1 }; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateUnreadableExtendedBTreeTest() { + try { + // create unreadable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(false); + + Boolean insertResult = mc.asyncBopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertFalse(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, null) + .get()); + + // get an item + CollectionFuture>> f = mc + .asyncBopGet(KEY, BKEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, + false); + Assert.assertNull(f.get()); + Assert.assertEquals("UNREADABLE", f.getOperationStatus() + .getMessage()); + + // set readable + attribute.setReadable(true); + Assert.assertTrue(mc.asyncSetAttr(KEY, attribute).get()); + + // get an item again + f = mc.asyncBopGet(KEY, BKEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false); + Map> map = f.get(); + + Assert.assertNotNull(map); + Assert.assertEquals(VALUE, map.get(new ByteArrayBKey(BKEY)) + .getValue()); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testCreateReadableExtendedBTreeTest() { + try { + // create readable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(true); + + Boolean insertResult = mc.asyncBopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertTrue(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, null) + .get()); + + // get an item + CollectionFuture>> f = mc + .asyncBopGet(KEY, BKEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, + false); + + Map> map = f.get(); + Assert.assertNotNull(map); + Assert.assertEquals(VALUE, map.get(new ByteArrayBKey(BKEY)) + .getValue()); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/UnReadableListTest.java b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableListTest.java new file mode 100644 index 000000000..778f54fd4 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableListTest.java @@ -0,0 +1,126 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import java.util.List; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.internal.CollectionFuture; + +public class UnReadableListTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE = "VALUE"; + private final int INDEX = 0; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateUnreadableListTest() { + try { + // create unreadable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(false); + + Boolean insertResult = mc.asyncLopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertFalse(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncLopInsert(KEY, INDEX, VALUE, + new CollectionAttributes()).get()); + + // get an item + CollectionFuture> f = mc.asyncLopGet(KEY, INDEX, + false, false); + Assert.assertNull(f.get()); + Assert.assertEquals("UNREADABLE", f.getOperationStatus() + .getMessage()); + + // set readable + attribute.setReadable(true); + Assert.assertTrue(mc.asyncSetAttr(KEY, attribute).get()); + + // get an item again + f = mc.asyncLopGet(KEY, INDEX, false, false); + List map = f.get(); + + Assert.assertNotNull(map); + Assert.assertEquals(VALUE, map.get(INDEX)); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testCreateReadableListTest() { + try { + // create readable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(true); + + Boolean insertResult = mc.asyncLopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertTrue(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncLopInsert(KEY, INDEX, VALUE, + new CollectionAttributes()).get()); + + // get an item + CollectionFuture> f = mc.asyncLopGet(KEY, INDEX, + false, false); + + List map = f.get(); + Assert.assertNotNull(map); + Assert.assertEquals(VALUE, map.get(INDEX)); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/attribute/UnReadableSetTest.java b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableSetTest.java new file mode 100644 index 000000000..c8c90c26c --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/attribute/UnReadableSetTest.java @@ -0,0 +1,127 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.attribute; + +import java.util.Set; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.internal.CollectionFuture; + +public class UnReadableSetTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE = "VALUE"; + private final int INDEX = 0; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateUnreadableSetTest() { + try { + // create unreadable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(false); + + Boolean insertResult = mc.asyncSopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertFalse(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE, + new CollectionAttributes()).get()); + + // get an item + CollectionFuture> f = mc.asyncSopGet(KEY, INDEX, false, + false); + Assert.assertNull(f.get()); + Assert.assertEquals("UNREADABLE", f.getOperationStatus() + .getMessage()); + + // set readable + attribute.setReadable(true); + Assert.assertTrue(mc.asyncSetAttr(KEY, attribute).get()); + + // get an item again + f = mc.asyncSopGet(KEY, INDEX, false, false); + Set set = f.get(); + + Assert.assertNotNull(set); + Assert.assertTrue(set.contains(VALUE)); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testCreateReadableSetTest() { + try { + // create readable empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setReadable(true); + + Boolean insertResult = mc.asyncSopCreate(KEY, + ElementValueType.STRING, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + Assert.assertTrue(attr.getReadable()); + + // insert an item + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE, + new CollectionAttributes()).get()); + + // get an item + CollectionFuture> f = mc.asyncSopGet(KEY, INDEX, false, + false); + + Set set = f.get(); + Assert.assertNotNull(set); + Assert.assertTrue(set.contains(VALUE)); + Assert.assertEquals("END", f.getOperationStatus().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopDeleteTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopDeleteTest.java new file mode 100644 index 000000000..e289c1314 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopDeleteTest.java @@ -0,0 +1,109 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +public class BopDeleteTest extends BaseIntegrationTest { + + private String key = "UnReadableBTreeTest"; + + private Long[] items9 = { 0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L }; + + protected void setUp() throws Exception { + super.setUp(); + + mc.asyncBopDelete(key, 0, 4000, ElementFlagFilter.DO_NOT_FILTER, 0, + false).get(1000, TimeUnit.MILLISECONDS); + addToBTree(key, items9); + + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testBopDelete_NoKey() throws Exception { + assertFalse(mc.asyncBopDelete("no_key", 0, + ElementFlagFilter.DO_NOT_FILTER, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testBopDelete_OutOfRange() throws Exception { + assertFalse(mc.asyncBopDelete(key, 11, ElementFlagFilter.DO_NOT_FILTER, + false).get(1000, TimeUnit.MILLISECONDS)); + } + + public void testBopDelete_DeleteByBestEffort() throws Exception { + // Delete items(2..11) in the list + assertTrue(mc.asyncBopDelete(key, 2, 11, + ElementFlagFilter.DO_NOT_FILTER, 0, false).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncBopGet(key, 0, 100, ElementFlagFilter.DO_NOT_FILTER, 0, 100, + false, false).get(1000, TimeUnit.MILLISECONDS); + + // By rule of 'best effort', + // items(2..9) should be deleted + // assertEquals(2, rmap.size()); + // assertEquals(1L, rlist.get(0)); + // assertEquals(2L, rlist.get(1)); + } + + public void testBopDelete_DeletedDropped() throws Exception { + // Delete all items in the list + assertTrue(mc.asyncBopDelete(key, 0, items9.length, + ElementFlagFilter.DO_NOT_FILTER, 0, true).get(1000, + TimeUnit.MILLISECONDS)); + + CollectionAttributes attrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertNull(attrs); + } + + public void testBopDeleteWithSingleBkey() throws Exception { + mc.delete(key).get(); + + byte[] bkey = new byte[] { (byte) 1 }; + Assert.assertTrue(mc.asyncBopInsert(key, bkey, null, "value", + new CollectionAttributes()).get()); + + Map> map = mc.asyncBopGet(key, bkey, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNotNull(map); + Assert.assertEquals(1, map.size()); + Assert.assertTrue(mc.asyncBopDelete(key, bkey, + ElementFlagFilter.DO_NOT_FILTER, true).get()); + + Assert.assertNull(mc.asyncBopGet(key, bkey, + ElementFlagFilter.DO_NOT_FILTER, false, false).get()); + + mc.delete(key).get(); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopFindPositionTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopFindPositionTest.java new file mode 100644 index 000000000..80c561cba --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopFindPositionTest.java @@ -0,0 +1,185 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BTreeOrder; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.internal.CollectionFuture; + +public class BopFindPositionTest extends BaseIntegrationTest { + + private String key = "BopFindPositionTest"; + private String invalidKey = "InvalidBopFindPositionTest"; + private String kvKey = "KvBopFindPositionTest"; + + private long[] longBkeys = { 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, + 19L }; + private byte[][] byteArrayBkeys = { new byte[] { 10 }, new byte[] { 11 }, + new byte[] { 12 }, new byte[] { 13 }, new byte[] { 14 }, + new byte[] { 15 }, new byte[] { 16 }, new byte[] { 17 }, + new byte[] { 18 }, new byte[] { 19 } }; + + protected void setUp() throws Exception { + super.setUp(); + mc.delete(key).get(1000, TimeUnit.MILLISECONDS); + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testLongBKeyAsc() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop position + for (int i = 0; i < longBkeys.length; i++) { + CollectionFuture f = mc.asyncBopFindPosition(key, + longBkeys[i], BTreeOrder.ASC); + Integer position = f.get(); + assertNotNull(position); + assertEquals(CollectionResponse.OK, f.getOperationStatus() + .getResponse()); + assertEquals(i, position.intValue()); + } + } + + public void testLongBKeyDesc() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop position + for (int i = 0; i < longBkeys.length; i++) { + CollectionFuture f = mc.asyncBopFindPosition(key, + longBkeys[i], BTreeOrder.DESC); + Integer position = f.get(); + assertNotNull(position); + assertEquals(CollectionResponse.OK, f.getOperationStatus() + .getResponse()); + assertEquals("invalid position", longBkeys.length - i - 1, + position.intValue()); + } + } + + public void testByteArrayBKeyAsc() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop position + for (int i = 0; i < byteArrayBkeys.length; i++) { + CollectionFuture f = mc.asyncBopFindPosition(key, + byteArrayBkeys[i], BTreeOrder.ASC); + Integer position = f.get(); + assertNotNull(position); + assertEquals(CollectionResponse.OK, f.getOperationStatus() + .getResponse()); + assertEquals(i, position.intValue()); + } + } + + public void testByteArrayBKeyDesc() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop position + for (int i = 0; i < byteArrayBkeys.length; i++) { + CollectionFuture f = mc.asyncBopFindPosition(key, + byteArrayBkeys[i], BTreeOrder.DESC); + Integer position = f.get(); + assertNotNull(position); + assertEquals(CollectionResponse.OK, f.getOperationStatus() + .getResponse()); + assertEquals("invalid position", longBkeys.length - i - 1, + position.intValue()); + } + } + + public void testUnsuccessfulResponses() throws Exception { + mc.delete(invalidKey).get(); + mc.delete(kvKey).get(); + + // insert + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setReadable(false); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // set a test key + mc.set(kvKey, 0, "value").get(); + + CollectionFuture f = null; + Integer position = null; + + // NOT_FOUND + f = mc.asyncBopFindPosition(invalidKey, byteArrayBkeys[0], + BTreeOrder.ASC); + position = f.get(); + assertNull(position); + assertEquals(CollectionResponse.NOT_FOUND, f.getOperationStatus() + .getResponse()); + + // UNREADABLE + f = mc.asyncBopFindPosition(key, byteArrayBkeys[0], BTreeOrder.ASC); + position = f.get(); + assertNull(position); + assertEquals(CollectionResponse.UNREADABLE, f.getOperationStatus() + .getResponse()); + + attrs.setReadable(true); + mc.asyncSetAttr(key, attrs).get(); + + // BKEY_MISMATCH + f = mc.asyncBopFindPosition(key, longBkeys[0], BTreeOrder.ASC); + position = f.get(); + assertNull(position); + assertEquals(CollectionResponse.BKEY_MISMATCH, f.getOperationStatus() + .getResponse()); + + // TYPE_MISMATCH + f = mc.asyncBopFindPosition(kvKey, byteArrayBkeys[0], BTreeOrder.ASC); + position = f.get(); + assertNull(position); + assertEquals(CollectionResponse.TYPE_MISMATCH, f.getOperationStatus() + .getResponse()); + + // NOT_FOUND_ELEMENT + byte[] invalidBkey = new byte[] { 64 }; + f = mc.asyncBopFindPosition(key, invalidBkey, BTreeOrder.ASC); + position = f.get(); + assertNull(position); + assertEquals(CollectionResponse.NOT_FOUND_ELEMENT, f + .getOperationStatus().getResponse()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopGetBulkTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopGetBulkTest.java new file mode 100644 index 000000000..514a95e52 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopGetBulkTest.java @@ -0,0 +1,382 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BTreeElement; +import net.spy.memcached.collection.BTreeGetResult; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.internal.CollectionGetBulkFuture; + +public class BopGetBulkTest extends BaseIntegrationTest { + + private final List keyList = new ArrayList() { + private static final long serialVersionUID = -4044682425313432602L; + { + add("BopGetBulkTest1"); + add("BopGetBulkTest2"); + add("BopGetBulkTest3"); + add("BopGetBulkTest4"); + add("BopGetBulkTest5"); + } + }; + + private final List keyList2 = new ArrayList() { + private static final long serialVersionUID = -4044682425313432602L; + { + for (int i = 1; i < 500; i++) { + add("BopGetBulkTest" + i); + } + } + }; + + private final byte[] eFlag = { 1, 8, 16, 32, 64 }; + + private final String value = String.valueOf(new Random().nextLong()); + + @Override + protected void setUp() throws Exception { + super.setUp(); + try { + for (int i = 0; i < keyList.size(); i++) { + mc.delete(keyList.get(i)).get(); + mc.asyncBopInsert(keyList.get(i), 0, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(i), 1, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(i), 2, null, value + "2", + new CollectionAttributes()).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + + } + + public void testGetBulkLongBkeyGetAll() { + try { + ElementFlagFilter filter = ElementFlagFilter.DO_NOT_FILTER; + + CollectionGetBulkFuture>> f = mc + .asyncBopGetBulk(keyList, 0, 10, filter, 0, 10); + + Map> results = f.get(1000L, + TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + + // System.out.println("\n\n\n"); + // for(Entry> entry : + // results.entrySet()) { + // System.out.println("\nk=" + entry.getKey()); + // System.out.println("code=" + + // entry.getValue().getCollectionResponse().getMessage()); + // + // if (entry.getValue().getElements() != null) { + // for(Entry> el : + // entry.getValue().getElements().entrySet()) { + // System.out.println("bkey=" + el.getKey() + ", eflag=" + + // Arrays.toString(el.getValue().getEflag()) + ", value=" + + // el.getValue().getValue()); + // } + // } + // } + + for (int i = 0; i < keyList.size(); i++) { + BTreeGetResult r = results.get(keyList.get(i)); + + // check response + Assert.assertNotNull(r.getCollectionResponse().getResponse()); + // Assert.assertEquals(CollectionResponse.OK, + // r.getCollectionResponse().getResponse()); + + // check elements + Map> elements = r + .getElements(); + + Assert.assertEquals(3, elements.size()); + + Assert.assertTrue(Arrays.equals(eFlag, elements.get(1L) + .getEflag())); + + for (long j = 0; j < elements.size(); j++) { + Assert.assertEquals(j, (long) elements.get(j).getBkey()); + Assert.assertEquals(value + j, (String) elements.get(j) + .getValue()); + } + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBulkNotFoundAll() { + try { + for (int i = 0; i < keyList.size(); i++) { + mc.delete(keyList.get(i)).get(); + } + + ElementFlagFilter filter = ElementFlagFilter.DO_NOT_FILTER; + + CollectionGetBulkFuture>> f = mc + .asyncBopGetBulk(keyList, 0, 10, filter, 0, 10); + + Map> results = f.get(1000L, + TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + + // System.out.println("\n\n\n"); + // for(Entry> entry : + // results.entrySet()) { + // System.out.println("\nk=" + entry.getKey()); + // System.out.println("code=" + + // entry.getValue().getCollectionResponse().getMessage()); + // + // if (entry.getValue().getElements() != null) { + // for(Entry> el : + // entry.getValue().getElements().entrySet()) { + // System.out.println("bkey=" + el.getKey() + ", eflag=" + + // Arrays.toString(el.getValue().getEflag()) + ", value=" + + // el.getValue().getValue()); + // } + // } + // } + + for (int i = 0; i < keyList.size(); i++) { + BTreeGetResult r = results.get(keyList.get(i)); + + Assert.assertEquals(CollectionResponse.NOT_FOUND, r + .getCollectionResponse().getResponse()); + Assert.assertNull(r.getElements()); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBulkNotFoundMixed() { + try { + // delete some data. + for (int i = 0; i < keyList.size(); i++) { + if (i % 2 == 0) + mc.delete(keyList.get(i)).get(); + } + + ElementFlagFilter filter = ElementFlagFilter.DO_NOT_FILTER; + + CollectionGetBulkFuture>> f = mc + .asyncBopGetBulk(keyList, 0, 10, filter, 0, 10); + + Map> results = f.get(1000L, + TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + + // for debug + // System.out.println("\n\n\n"); + // for(Entry> entry : + // results.entrySet()) { + // System.out.println("\nk=" + entry.getKey()); + // System.out.println("code=" + + // entry.getValue().getCollectionResponse().getMessage()); + // + // if (entry.getValue().getElements() != null) { + // for(Entry> el : + // entry.getValue().getElements().entrySet()) { + // System.out.println("bkey=" + el.getKey() + ", eflag=" + + // Arrays.toString(el.getValue().getEflag()) + ", value=" + + // el.getValue().getValue()); + // } + // } + // } + + // check result + for (int i = 0; i < keyList.size(); i++) { + BTreeGetResult r = results.get(keyList.get(i)); + + if (i % 2 == 0) { + Assert.assertEquals(CollectionResponse.NOT_FOUND, r + .getCollectionResponse().getResponse()); + } else { + Assert.assertEquals(CollectionResponse.OK, r + .getCollectionResponse().getResponse()); + + Map> elements = r + .getElements(); + + Assert.assertEquals(3, elements.size()); + + Assert.assertTrue(Arrays.equals(eFlag, elements.get(1L) + .getEflag())); + + for (long j = 0; j < elements.size(); j++) { + Assert.assertEquals(j, (long) elements.get(j).getBkey()); + Assert.assertEquals(value + j, (String) elements.get(j) + .getValue()); + } + } + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testErrorArguments() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + // empty key list + f = mc.asyncBopGetBulk(new ArrayList(), 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + Assert.assertEquals(0, results.size()); + + // max key list + try { + f = mc.asyncBopGetBulk(keyList2, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + } catch (IllegalArgumentException e) { + + } + + // max count list + try { + f = mc.asyncBopGetBulk(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 1000); + results = f.get(1000L, TimeUnit.MILLISECONDS); + } catch (IllegalArgumentException e) { + + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testUnreadable() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setReadable(false); + mc.asyncBopInsert(keyList.get(0), 0, null, value + "0", attrs) + .get(); + + f = mc.asyncBopGetBulk(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + Assert.assertEquals("UNREADABLE", results.get(keyList.get(0)) + .getCollectionResponse().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testNotFoundElement() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + mc.asyncBopInsert(keyList.get(0), 0, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), 1, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), 2, null, value + "2", + new CollectionAttributes()).get(); + + f = mc.asyncBopGetBulk(keyList, 1000, 10000, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + for (int i = 0; i < results.size(); i++) { + Assert.assertEquals("NOT_FOUND_ELEMENT", + results.get(keyList.get(i)).getCollectionResponse() + .getMessage()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testTypeMismatch() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + mc.set(keyList.get(0), 10, "V").get(200L, TimeUnit.MILLISECONDS); + + f = mc.asyncBopGetBulk(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + Assert.assertEquals("TYPE_MISMATCH", results.get(keyList.get(0)) + .getCollectionResponse().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testBKeyMismatch() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + mc.asyncBopInsert(keyList.get(0), new byte[] { 0 }, null, + value + "0", new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), new byte[] { 1 }, eFlag, + value + "0", new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), new byte[] { 2 }, null, + value + "0", new CollectionAttributes()).get(); + + f = mc.asyncBopGetBulk(keyList, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + Assert.assertEquals("BKEY_MISMATCH", results.get(keyList.get(0)) + .getCollectionResponse().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopGetByPositionTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopGetByPositionTest.java new file mode 100644 index 000000000..a1e54f8c8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopGetByPositionTest.java @@ -0,0 +1,437 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Arrays; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BTreeOrder; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.Element; +import net.spy.memcached.internal.CollectionFuture; + +public class BopGetByPositionTest extends BaseIntegrationTest { + + private String key = "BopGetByPositionTest"; + private String invalidKey = "InvalidBopGetByPositionTest"; + private String kvKey = "KvBopGetByPositionTest"; + + private long[] longBkeys = { 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, + 19L }; + private byte[][] byteArrayBkeys = { new byte[] { 10 }, new byte[] { 11 }, + new byte[] { 12 }, new byte[] { 13 }, new byte[] { 14 }, + new byte[] { 15 }, new byte[] { 16 }, new byte[] { 17 }, + new byte[] { 18 }, new byte[] { 19 } }; + + protected void setUp() throws Exception { + super.setUp(); + mc.delete(key).get(1000, TimeUnit.MILLISECONDS); + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testLongBKeySingle() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop gbp + int pos = 5; + CollectionFuture>> f = mc + .asyncBopGetByPosition(key, BTreeOrder.ASC, pos); + Map> result = f.get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(1, result.size()); + assertEquals(CollectionResponse.END, f.getOperationStatus() + .getResponse()); + + for (Entry> each : result.entrySet()) { + // System.out.println(String.format("index:%d, bkey:%d, value:%s", + // each.getKey(), each.getValue().getLongBkey(), each + // .getValue().getValue())); + assertEquals("invalid index", pos, each.getKey().intValue()); + assertEquals("invalid bkey", longBkeys[pos], each.getValue() + .getLongBkey()); + assertEquals("invalid value", "val", each.getValue().getValue()); + } + } + + public void testLongBKeyMultiple() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop gbp + int posFrom = 5; + int posTo = 8; + CollectionFuture>> f = mc + .asyncBopGetByPosition(key, BTreeOrder.ASC, posFrom, posTo); + Map> result = f.get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(4, result.size()); + assertEquals(CollectionResponse.END, f.getOperationStatus() + .getResponse()); + + int count = 0; + for (Entry> each : result.entrySet()) { + // System.out.println(String.format("index:%d, bkey:%d, value:%s", + // each.getKey(), each.getValue().getLongBkey(), each + // .getValue().getValue())); + int currPos = posFrom + count++; + assertEquals("invalid index", currPos, each.getKey().intValue()); + assertEquals("invalid bkey", longBkeys[currPos], each.getValue() + .getLongBkey()); + assertEquals("invalid value", "val", each.getValue().getValue()); + } + } + + public void testLongBKeyMultipleReversed() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop gbp + int posFrom = 8; + int posTo = 5; + CollectionFuture>> f = mc + .asyncBopGetByPosition(key, BTreeOrder.ASC, posFrom, posTo); + Map> result = f.get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(4, result.size()); + assertEquals(CollectionResponse.END, f.getOperationStatus() + .getResponse()); + + int count = 0; + for (Entry> each : result.entrySet()) { + // System.out.println(String.format("index:%d, bkey:%d, value:%s", + // each.getKey(), each.getValue().getLongBkey(), each + // .getValue().getValue())); + int currPos = posFrom - count++; + assertEquals("invalid index", currPos, each.getKey().intValue()); + assertEquals("invalid bkey", longBkeys[currPos], each.getValue() + .getLongBkey()); + assertEquals("invalid value", "val", each.getValue().getValue()); + } + } + + public void testByteArrayBKeySingle() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop gbp + int pos = 5; + CollectionFuture>> f = mc + .asyncBopGetByPosition(key, BTreeOrder.ASC, pos); + Map> result = f.get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(1, result.size()); + assertEquals(CollectionResponse.END, f.getOperationStatus() + .getResponse()); + + for (Entry> each : result.entrySet()) { + // System.out.println(String.format("index:%d, bkey:%s, value:%s", + // each.getKey(), each.getValue().getBkeyByHex(), each + // .getValue().getValue())); + assertEquals("invalid index", pos, each.getKey().intValue()); + assertTrue("invalid bkey", Arrays.equals(byteArrayBkeys[pos], each + .getValue().getByteArrayBkey())); + assertEquals("invalid value", "val", each.getValue().getValue()); + } + } + + public void testByteArrayBKeyMultiple() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop gbp + int posFrom = 5; + int posTo = 8; + CollectionFuture>> f = mc + .asyncBopGetByPosition(key, BTreeOrder.ASC, posFrom, posTo); + Map> result = f.get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(4, result.size()); + assertEquals(CollectionResponse.END, f.getOperationStatus() + .getResponse()); + + int count = 0; + for (Entry> each : result.entrySet()) { + // System.out.println(String.format("index:%d, bkey:%s, value:%s", + // each.getKey(), each.getValue().getBkeyByHex(), each + // .getValue().getValue())); + int currPos = posFrom + count++; + assertEquals("invalid index", currPos, each.getKey().intValue()); + assertTrue("invalid bkey", Arrays.equals(byteArrayBkeys[currPos], + each.getValue().getByteArrayBkey())); + assertEquals("invalid value", "val", each.getValue().getValue()); + } + } + + public void testByteArrayBKeyMultipleReversed() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // bop gbp + int posFrom = 8; + int posTo = 5; + CollectionFuture>> f = mc + .asyncBopGetByPosition(key, BTreeOrder.ASC, posFrom, posTo); + Map> result = f.get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(4, result.size()); + assertEquals(CollectionResponse.END, f.getOperationStatus() + .getResponse()); + + int count = 0; + for (Entry> each : result.entrySet()) { + // System.out.println(String.format("index:%d, bkey:%s, value:%s", + // each.getKey(), each.getValue().getBkeyByHex(), each + // .getValue().getValue())); + int currPos = posFrom - count++; + assertEquals("invalid index", currPos, each.getKey().intValue()); + assertTrue("invalid bkey", Arrays.equals(byteArrayBkeys[currPos], + each.getValue().getByteArrayBkey())); + assertEquals("invalid value", "val", each.getValue().getValue()); + } + } + + public void testUnsuccessfulResponses() throws Exception { + mc.delete(invalidKey).get(); + mc.delete(kvKey).get(); + + // insert + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setReadable(false); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // set a test key + mc.set(kvKey, 0, "value").get(); + + CollectionFuture>> f = null; + Map> result = null; + + // NOT_FOUND + f = mc.asyncBopGetByPosition(invalidKey, BTreeOrder.ASC, 0); + result = f.get(); + assertNull(result); + assertEquals(CollectionResponse.NOT_FOUND, f.getOperationStatus() + .getResponse()); + + // UNREADABLE + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 0); + result = f.get(); + assertNull(result); + assertEquals(CollectionResponse.UNREADABLE, f.getOperationStatus() + .getResponse()); + + attrs.setReadable(true); + mc.asyncSetAttr(key, attrs).get(); + + // TYPE_MISMATCH + f = mc.asyncBopGetByPosition(kvKey, BTreeOrder.ASC, 0); + result = f.get(); + assertNull(result); + assertEquals(CollectionResponse.TYPE_MISMATCH, f.getOperationStatus() + .getResponse()); + + // NOT_FOUND_ELEMENT + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 2000); + result = f.get(); + assertNotNull(result); + assertEquals(0, result.size()); + assertEquals(CollectionResponse.NOT_FOUND_ELEMENT, f + .getOperationStatus().getResponse()); + + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 1000, 2000); + result = f.get(); + assertNotNull(result); + assertEquals(0, result.size()); + assertEquals(CollectionResponse.NOT_FOUND_ELEMENT, f + .getOperationStatus().getResponse()); + } + + public void testAscDesc() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + CollectionFuture>> f = null; + Map> result = null; + + // 1. ASC 5 20 5 6 7 8 9 + // 2. ASC 20 5 9 8 7 6 5 + // 3. DESC 5 20 5 6 7 8 9 + // 4. DESC 20 5 9 8 7 6 5 + int prevPos = 0; + + // case 1. + prevPos = Integer.MIN_VALUE; + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 5, 20); + result = f.get(); + assertNotNull(result); + assertEquals(5, result.size()); + System.out.println(result.keySet()); + + for (Entry> each : result.entrySet()) { + int currPos = each.getKey(); + assertTrue("positions are not in ascending order", + currPos > prevPos); + prevPos = currPos; + + Element e = each.getValue(); + // assertEquals(longBkeys[currPos], e.getLongBkey()); + System.out.println(currPos + " : " + e.getLongBkey()); + } + + // case 2. + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 20, 5); + result = f.get(); + assertNotNull(result); + assertEquals(5, result.size()); + System.out.println(result.keySet()); + + prevPos = Integer.MAX_VALUE; + for (Entry> each : result.entrySet()) { + int currPos = each.getKey(); + assertTrue("positions are not in descending order", + currPos < prevPos); + prevPos = currPos; + + Element e = each.getValue(); + assertEquals(longBkeys[currPos], e.getLongBkey()); + } + + // case 3. + f = mc.asyncBopGetByPosition(key, BTreeOrder.DESC, 5, 20); + result = f.get(); + assertNotNull(result); + assertEquals(5, result.size()); + + prevPos = Integer.MAX_VALUE; + for (Entry> each : result.entrySet()) { + int currPos = longBkeys.length - each.getKey() - 1; + assertTrue("positions are not in ascending order (reversed)", + currPos < prevPos); + prevPos = currPos; + + Element e = each.getValue(); + assertEquals(longBkeys[currPos], e.getLongBkey()); + } + + // case 4. + f = mc.asyncBopGetByPosition(key, BTreeOrder.DESC, 20, 5); + result = f.get(); + assertNotNull(result); + assertEquals(5, result.size()); + System.out.println(result.keySet()); + + prevPos = Integer.MIN_VALUE; + for (Entry> each : result.entrySet()) { + int currPos = longBkeys.length - each.getKey() - 1; + assertTrue("positions are not in descending order (reversed)", + currPos > prevPos); + prevPos = currPos; + + Element e = each.getValue(); + assertEquals(longBkeys[currPos], e.getLongBkey()); + } + } + + public void testInvalidArgumentException() throws Exception { + // insert + CollectionAttributes attrs = new CollectionAttributes(); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + CollectionFuture>> f = null; + + // BTreeOrder == null + try { + f = mc.asyncBopGetByPosition(key, null, 5, 20); + f.get(); + fail("This should be an exception"); + } catch (IllegalArgumentException e) { + assertEquals("BTreeOrder should not be null", e.getMessage()); + } + + // Position < 0 + try { + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, -1); + fail("This should be an exception"); + } catch (IllegalArgumentException e) { + assertEquals("Positions should be 0 or positive integer", + e.getMessage()); + } + + try { + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, -1, 20); + fail("This should be an exception"); + } catch (IllegalArgumentException e) { + assertEquals("Positions should be 0 or positive integer", + e.getMessage()); + } + + try { + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 0, -1); + fail("This should be an exception"); + } catch (IllegalArgumentException e) { + assertEquals("Positions should be 0 or positive integer", + e.getMessage()); + } + + try { + f = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, -1, -1); + fail("This should be an exception"); + } catch (IllegalArgumentException e) { + assertEquals("Positions should be 0 or positive integer", + e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopGetExceptionTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopGetExceptionTest.java new file mode 100644 index 000000000..5cced8f29 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopGetExceptionTest.java @@ -0,0 +1,72 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.transcoders.LongTranscoder; + +public class BopGetExceptionTest extends BaseIntegrationTest { + + private String key = "BopGetExceptionTest"; + + private Long[] items10 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L }; + + protected void setUp() { + try { + super.setUp(); + mc.asyncBopDelete(key, 0, 100, ElementFlagFilter.DO_NOT_FILTER, 0, + true).get(1000, TimeUnit.MILLISECONDS); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testBopGet_OutOfBound() throws Exception { + // Create a list and add 10 items in it + addToBTree(key, items10); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Get item with offset and index + Map> rmap = mc.asyncBopGet(key, 20, 100, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + // The result should not be null + assertNotNull(rmap); + } + + public void testBopGet_NoKey() throws Exception { + // Querying to non-existing collection + Map> rmap = mc.asyncBopGet(key, 0, 100, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + // The result should be null + assertNull(rmap); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopGetIrregularEflagTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopGetIrregularEflagTest.java new file mode 100644 index 000000000..ab6421e78 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopGetIrregularEflagTest.java @@ -0,0 +1,139 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +public class BopGetIrregularEflagTest extends BaseIntegrationTest { + + private final String key = "BopGetIrregularEflagTest"; + + private final byte[] eFlag = { 1 }; + + private final Object value = "valvalvalvalvalvalvalvalvalval"; + + public void testGetAll_1() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, 0, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 1, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 2, eFlag, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Object object = map.get(i).getValue(); + Assert.assertEquals(value + String.valueOf(i), object); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetAll_2() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, 0, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 1, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 2, eFlag, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Object object = map.get(i).getValue(); + Assert.assertEquals(value + String.valueOf(i), object); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetAll_3() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, 0, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 1, null, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 2, eFlag, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Object object = map.get(i).getValue(); + Assert.assertEquals(value + String.valueOf(i), object); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetAll_4() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, 0, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 1, null, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, 2, null, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Object object = map.get(i).getValue(); + Assert.assertEquals(value + String.valueOf(i), object); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopGetOffsetSupportTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopGetOffsetSupportTest.java new file mode 100644 index 000000000..eed00dddc --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopGetOffsetSupportTest.java @@ -0,0 +1,105 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.transcoders.LongTranscoder; + +public class BopGetOffsetSupportTest extends BaseIntegrationTest { + + private String key = "BopGetOffsetSupportTest"; + + private Long[] items10 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L }; + + protected void tearDown() { + try { + deleteBTree(key, items10); + super.tearDown(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testBopGetOffset_Normal() throws Exception { + // Create a list and add 10 items in it + addToBTree(key, items10); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Get item with offset and index + Map> rmap = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 5, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(5, rmap.size()); + assertEquals((Long) 10L, rmap.get(9L).getValue()); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + + // Get item with offset and index with default transcoder + Map> rmap2 = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 5, 10, false, false).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(5, rmap2.size()); + } + + public void testBopGetOffset_More() throws Exception { + // Create a list and add 10 items in it + addToBTree(key, items10); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + + // Get item with offset and index with default transcoder + int offset = 0; + Map> rmap = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, offset, 10, false, false).get( + 1000, TimeUnit.MILLISECONDS); + assertEquals(10, rmap.size()); + + // offset should be >= 0, but the server doesn't care anyway + offset = -1; + rmap = mc.asyncBopGet(key, 0, 10, ElementFlagFilter.DO_NOT_FILTER, + offset, 10, false, false).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10, rmap.size()); + + // if offset > max index of b+tree + offset = 10; + rmap = mc.asyncBopGet(key, 0, 10, ElementFlagFilter.DO_NOT_FILTER, + offset, 10, false, false).get(1000, TimeUnit.MILLISECONDS); + assertNotNull(rmap); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopGetSortTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopGetSortTest.java new file mode 100644 index 000000000..785cb9122 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopGetSortTest.java @@ -0,0 +1,114 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.transcoders.LongTranscoder; + +public class BopGetSortTest extends BaseIntegrationTest { + + private String key = "BopGetSortTest"; + + private Long[] items10 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L }; + + protected void tearDown() { + try { + deleteBTree(key, items10); + super.tearDown(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testBopGet_Asc() throws Exception { + // Create a list and add 10 items in it + addToBTree(key, items10); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Get item with offset and index + Map> rmap = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10, rmap.size()); + assertEquals((Long) 10L, rmap.get(9L).getValue()); + + // Check ordering + assertTrue(rmap instanceof TreeMap); + assertEquals(0L, ((TreeMap>) rmap).firstKey() + .longValue()); + assertEquals(9L, ((TreeMap>) rmap).lastKey() + .longValue()); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + + // Get item with offset and index with default transcoder + Map> rmap2 = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rmap2.size()); + } + + public void testBopGet_Desc() throws Exception { + // Create a list and add 10 items in it + addToBTree(key, items10); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Get item with offset and index + Map> rmap = mc.asyncBopGet(key, 10, 0, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10, rmap.size()); + assertEquals((Long) 10L, rmap.get(9L).getValue()); + + // Check ordering + assertTrue(rmap instanceof TreeMap); + assertEquals(9L, ((TreeMap>) rmap).firstKey() + .longValue()); + assertEquals(0L, ((TreeMap>) rmap).lastKey() + .longValue()); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + + // Get item with offset and index with default transcoder + Map> rmap2 = mc.asyncBopGet(key, 0, 10, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rmap2.size()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopInsertAndGetWithElementFlagTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopInsertAndGetWithElementFlagTest.java new file mode 100644 index 000000000..355ba14af --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopInsertAndGetWithElementFlagTest.java @@ -0,0 +1,179 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementMultiFlagsFilter; +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; + +public class BopInsertAndGetWithElementFlagTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10L; + private final String VALUE = "VALUE"; + private final byte[] FLAG = "FLAG".getBytes(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + }; + + public void testBopInsertAndGetWithEFlag() throws Exception { + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, FLAG, VALUE, + new CollectionAttributes()).get()); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get( + Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + } + + public void testBopInsertAndGetWithoutEFlag() throws Exception { + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get( + Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + } + + public void testBopInsertAndRangedGetWithEFlag() throws Exception { + + // insert 3 bkeys + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, FLAG, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 1, FLAG, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 2, FLAG, VALUE, + new CollectionAttributes()).get()); + + Map> map = mc.asyncBopGet(KEY, BKEY, BKEY + 2, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get( + Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(3, map.size()); + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + Assert.assertEquals(VALUE, map.get(BKEY + 1).getValue()); + Assert.assertEquals(VALUE, map.get(BKEY + 2).getValue()); + } + + public void testBopInsertAndRangedGetWithoutEFlag() throws Exception { + + // insert 3 bkeys + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 1, null, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 2, null, VALUE, + new CollectionAttributes()).get()); + + Map> map = mc.asyncBopGet(KEY, BKEY, BKEY + 2, + ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false).get( + Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(3, map.size()); + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + Assert.assertEquals(VALUE, map.get(BKEY + 1).getValue()); + Assert.assertEquals(VALUE, map.get(BKEY + 2).getValue()); + } + + public void testGetAllOfNotFlaggedBkeys() throws Exception { + // insert 3 bkeys + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 1, null, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 2, FLAG, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 3, FLAG, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 4, null, VALUE, + new CollectionAttributes()).get()); + + // get not flagged bkeys + ElementFlagFilter filter = new ElementFlagFilter(CompOperands.NotEqual, + new byte[] { 0 }); + filter.setBitOperand(BitWiseOperands.AND, new byte[] { 0 }); + + Map> map = mc.asyncBopGet(KEY, BKEY, BKEY + 100, + filter, 0, 100, false, false).get(); + + Assert.assertEquals(3, map.size()); + } + + public void testBopInsertAndRangedGetWithEFlags() throws Exception { + + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, new byte[] { 0 }, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 1, new byte[] { 1 }, + VALUE, new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 2, new byte[] { 2 }, + VALUE, new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 3, new byte[] { 3 }, + VALUE, new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY + 4, new byte[] { 4 }, + VALUE, new CollectionAttributes()).get()); + + ElementMultiFlagsFilter filter = new ElementMultiFlagsFilter(); + filter.setCompOperand(CompOperands.Equal); + filter.addCompValue(new byte[] { 0 }); + filter.addCompValue(new byte[] { 1 }); + filter.addCompValue(new byte[] { 2 }); + filter.addCompValue(new byte[] { 3 }); + + // filter.setBitOperand(BitWiseOperands.AND, new byte[] { 0 }); + + Map> map = mc.asyncBopGet(KEY, BKEY, BKEY + 100, + filter, 0, 100, false, false).get(); + + Assert.assertEquals(4, map.size()); + + ElementMultiFlagsFilter filter2 = new ElementMultiFlagsFilter(); + filter2.setCompOperand(CompOperands.NotEqual); + filter2.addCompValue(new byte[] { 0 }); + filter2.addCompValue(new byte[] { 1 }); + filter2.addCompValue(new byte[] { 2 }); + filter2.addCompValue(new byte[] { 3 }); + + // filter.setBitOperand(BitWiseOperands.AND, new byte[] { 0 }); + + Map> map2 = mc.asyncBopGet(KEY, BKEY, BKEY + 100, + filter2, 0, 100, false, false).get(); + + Assert.assertEquals(1, map2.size()); + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyExists.java b/src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyExists.java new file mode 100644 index 000000000..4fd429a8f --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyExists.java @@ -0,0 +1,142 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.transcoders.LongTranscoder; + +public class BopInsertWhenKeyExists extends BaseIntegrationTest { + + private String key = "BopInsertWhenKeyExists"; + + private Long[] items9 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L }; + + protected void tearDown() { + try { + mc.asyncBopDelete(key, 0, 4000, ElementFlagFilter.DO_NOT_FILTER, 0, + true).get(1000, TimeUnit.MILLISECONDS); + mc.delete(key).get(); + super.tearDown(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testBopInsert_unreadable_largestTrim() throws Exception { + // insert with unreadable + CollectionAttributes attr = new CollectionAttributes(); + attr.setReadable(false); + attr.setOverflowAction(CollectionOverflowAction.largest_trim); + + // insert + Boolean result = mc.asyncBopInsert(key, 0L, null, "value", attr).get(); + Assert.assertTrue(result); + + // get attr + CollectionAttributes attr2 = mc.asyncGetAttr(key).get(); + Assert.assertFalse(attr2.getReadable()); + Assert.assertEquals(CollectionOverflowAction.largest_trim, + attr2.getOverflowAction()); + + // get element + CollectionFuture>> future = mc.asyncBopGet( + key, 0L, ElementFlagFilter.DO_NOT_FILTER, false, false); + Assert.assertNull(future.get()); + + Assert.assertEquals(CollectionResponse.UNREADABLE, future + .getOperationStatus().getResponse()); + } + + public void testBopInsert_Normal() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert one item + assertTrue(mc.asyncBopInsert(key, 20, null, 10L, + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS)); + + // Check inserted item + Map> rmap = mc.asyncBopGet(key, 0, 100, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10, rmap.size()); + assertEquals((Long) 10L, rmap.get(20L).getValue()); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + } + + public void testBopInsert_SameItem() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert an item same to the last item + mc.asyncBopInsert(key, 10, null, 9L, new CollectionAttributes()).get( + 1000, TimeUnit.MILLISECONDS); + + // Check that item is inserted + Map> rmap = mc.asyncBopGet(key, 0, 100, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10, rmap.size()); + } + + public void testBopInsert_SameBkey() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert an item same to the last item + mc.asyncBopInsert(key, 8, null, 10L, new CollectionAttributes()).get( + 1000, TimeUnit.MILLISECONDS); + + // Check that item is inserted + Map> rmap = mc.asyncBopGet(key, 0, 100, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(9, rmap.size()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyNotExist.java b/src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyNotExist.java new file mode 100644 index 000000000..8dc1d90be --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopInsertWhenKeyNotExist.java @@ -0,0 +1,115 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class BopInsertWhenKeyNotExist extends BaseIntegrationTest { + + private String key = "BopInsertWhenKeyNotExist"; + + private String[] items9 = { "value0", "value1", "value2", "value3", + "value4", "value5", "value6", "value7", "value8", }; + + protected void tearDown() { + try { + deleteBTree(key, items9); + super.tearDown(); + } catch (Exception e) { + } + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * true	false	null
+	 * 
+ */ + public void testBopInsert_nokey_01() throws Exception { + insertToFail(key, true, null); + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * false	true	not null
+	 * 
+ */ + public void testBopInsert_nokey_02() throws Exception { + assertFalse(insertToSucceed(key, false, items9[0])); + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * false	false	not null
+	 * 
+ */ + public void testBopInsert_nokey_04() throws Exception { + assertFalse(insertToSucceed(key, false, items9[0])); + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * true	true	not null
+	 * 
+ */ + public void testBopInsert_nokey_05() throws Exception { + assertTrue(insertToSucceed(key, true, items9[0])); + } + + boolean insertToFail(String key, boolean createKeyIfNotExists, Object value) { + boolean result = false; + try { + result = mc + .asyncBopInsert( + key, + 0, + null, + value, + ((createKeyIfNotExists) ? new CollectionAttributes() + : null)).get(1000, TimeUnit.MILLISECONDS); + fail("should be failed"); + } catch (Exception e) { + } + return result; + } + + boolean insertToSucceed(String key, boolean createKeyIfNotExists, + Object value) { + boolean result = false; + try { + result = mc + .asyncBopInsert( + key, + 0, + null, + value, + ((createKeyIfNotExists) ? new CollectionAttributes() + : null)).get(1000, TimeUnit.MILLISECONDS); + } catch (Exception e) { + e.printStackTrace(); + fail("should not be failed"); + } + return result; + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopMutateTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopMutateTest.java new file mode 100644 index 000000000..dfd865aa1 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopMutateTest.java @@ -0,0 +1,106 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.internal.CollectionFuture; + +public class BopMutateTest extends BaseIntegrationTest { + + private String key = "BopMutate"; + + private String[] items9 = { "1", "2", "3", "4", "5", "6", "7", "8", "9", + "a" }; + + protected void setUp() { + try { + super.setUp(); + mc.delete(key); + } catch (Exception e) { + } + } + + public void testBopIncrDecr_Basic() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + // incr 2 + assertTrue(mc.asyncBopIncr(key, 1L, (int) 2) + .get(1000, TimeUnit.MILLISECONDS).equals(4L)); + // incr 10 + assertTrue(mc.asyncBopIncr(key, 1L, (int) 10) + .get(1000, TimeUnit.MILLISECONDS).equals(14L)); + + // decr 1 + assertTrue(mc.asyncBopDecr(key, 1L, (int) 1) + .get(1000, TimeUnit.MILLISECONDS).equals(13L)); + // decr 11 + assertTrue(mc.asyncBopDecr(key, 1L, (int) 11) + .get(1000, TimeUnit.MILLISECONDS).equals(2L)); + // decr 4 + assertTrue(mc.asyncBopDecr(key, 1L, (int) 4) + .get(1000, TimeUnit.MILLISECONDS).equals(0L)); + } + + public void testBopIncrDecr_Minus() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + // decr 10 + assertTrue(mc.asyncBopDecr(key, 1L, (int) 10) + .get(1000, TimeUnit.MILLISECONDS).equals(0L)); + } + + public void testBopIncrDecr_NoKeyError() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + // not exists the key + CollectionFuture future = mc.asyncBopIncr("aaaaa", 0L, (int) 2); + Long result = future.get(1000, TimeUnit.MILLISECONDS); + CollectionResponse response = future.getOperationStatus().getResponse(); + assertTrue(response.toString() == "NOT_FOUND"); + + // not exists the bkey + CollectionFuture future2 = mc.asyncBopIncr(key, 10L, (int) 2); + Long result2 = future2.get(1000, TimeUnit.MILLISECONDS); + CollectionResponse response2 = future2.getOperationStatus() + .getResponse(); + assertTrue(response2.toString() == "NOT_FOUND_ELEMENT"); + } + + public void testBopIncrDecr_StringError() throws Exception { + // Create a list and add it 9 items + addToBTree(key, items9); + + try { + // incr string value + CollectionFuture future3 = mc.asyncBopIncr(key, 9L, (int) 2); + Long result3 = future3.get(1000, TimeUnit.MILLISECONDS); + CollectionResponse response3 = future3.getOperationStatus() + .getResponse(); + System.out.println(response3.toString()); + } catch (Exception e) { + assertEquals( + "OperationException: CLIENT: CLIENT_ERROR cannot increment or decrement non-numeric value", + e.getMessage()); + } + } +} \ No newline at end of file diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopOverflowActionTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopOverflowActionTest.java new file mode 100644 index 000000000..2db71fc9b --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopOverflowActionTest.java @@ -0,0 +1,245 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +public class BopOverflowActionTest extends BaseIntegrationTest { + + private String key = "BopGetBoundaryTest"; + + protected void setUp() { + try { + super.setUp(); + mc.delete(key); + // mc.asyncBopDelete(key, 0, 20000, ElementFlagFilter.DO_NOT_FILTER, + // 0, true).get(1000, TimeUnit.MILLISECONDS); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testBopGet_Maxcount() throws Exception { + // Test + for (int maxcount = 100; maxcount <= 300; maxcount += 100) { + // Create a B+ Tree + mc.asyncBopInsert(key, 0, null, "item0", new CollectionAttributes()); + + // Set maxcount + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, + TimeUnit.MILLISECONDS)); + + for (int i = 1; i <= maxcount; i++) { + mc.asyncBopInsert(key, i, null, "item" + i, + new CollectionAttributes()).get(); + } + + Map> result = mc.asyncBopGet(key, 0, + maxcount + 1000, ElementFlagFilter.DO_NOT_FILTER, 0, + maxcount + 1000, false, false).get(10000, + TimeUnit.MILLISECONDS); + assertEquals(maxcount, result.size()); + assertTrue(mc.asyncBopDelete(key, 0, 20000, + ElementFlagFilter.DO_NOT_FILTER, 0, false).get(1000, + TimeUnit.MILLISECONDS)); + } + } + + public void testBopGet_Overflow() throws Exception { + // Create a B+ Tree + mc.asyncBopInsert(key, 0, null, "item0", new CollectionAttributes()); + + int maxcount = 100; + + // Set maxcount to 10000 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.error); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert more than maxcount + for (int i = 1; i <= maxcount + 10; i++) { + mc.asyncBopInsert(key, i, null, "item" + i, null).get(1000, + TimeUnit.MILLISECONDS); + } + + Map> result = mc.asyncBopGet(key, 0, + maxcount + 10, ElementFlagFilter.DO_NOT_FILTER, 0, + maxcount + 1000, false, false) + .get(10000, TimeUnit.MILLISECONDS); + + // result size should be maxsize(10000) + assertEquals(maxcount, result.size()); + assertTrue(result instanceof TreeMap); + assertEquals(0L, ((TreeMap>) result).firstKey() + .longValue()); + assertEquals(99L, ((TreeMap>) result).lastKey() + .longValue()); + assertTrue(mc.asyncBopDelete(key, 0, 20000, + ElementFlagFilter.DO_NOT_FILTER, 0, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testBopGet_LargestTrim() throws Exception { + // Create a B+ Tree + mc.asyncBopInsert(key, 0, null, "item0", new CollectionAttributes()); + + int maxcount = 100; + + // Set maxcount to 10000 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.largest_trim); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert more than maxcount + for (int i = maxcount + 10; i >= 1; i--) { + assertTrue(mc.asyncBopInsert(key, i, null, "item" + i, null).get( + 1000, TimeUnit.MILLISECONDS)); + } + + Map> result = mc.asyncBopGet(key, 0, + maxcount + 10, ElementFlagFilter.DO_NOT_FILTER, 0, + maxcount + 1000, false, false) + .get(10000, TimeUnit.MILLISECONDS); + + // result size should be maxsize(10000) + assertEquals(100, result.size()); + assertTrue(result instanceof TreeMap); + assertEquals(0L, ((TreeMap>) result).firstKey() + .longValue()); + assertEquals(99L, ((TreeMap>) result).lastKey() + .longValue()); + assertTrue(mc.asyncBopDelete(key, 0, 20000, + ElementFlagFilter.DO_NOT_FILTER, 0, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testBopGet_SmallestTrim() throws Exception { + // Create a B+ Tree + mc.asyncBopInsert(key, 0, null, "item0", new CollectionAttributes()); + + int maxcount = 100; + + // Set maxcount to 10000 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert more than maxcount + for (int i = 1; i <= maxcount + 10; i++) { + assertTrue(mc.asyncBopInsert(key, i, null, "item" + i, null).get( + 1000, TimeUnit.MILLISECONDS)); + } + + Map> result = mc.asyncBopGet(key, 0, + maxcount + 10, ElementFlagFilter.DO_NOT_FILTER, 0, + maxcount + 1000, false, false) + .get(10000, TimeUnit.MILLISECONDS); + + // result size should be maxsize(10000) + assertEquals(100, result.size()); + assertTrue(result instanceof TreeMap); + assertEquals(11L, ((TreeMap>) result).firstKey() + .longValue()); + assertEquals(110L, ((TreeMap>) result).lastKey() + .longValue()); + assertTrue(mc.asyncBopDelete(key, 0, 20000, + ElementFlagFilter.DO_NOT_FILTER, 0, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testBopGet_SmallestTrim_OutOfRange() throws Exception { + // Create a set + mc.asyncBopInsert(key, 1, null, "item1", new CollectionAttributes()); + + // smallest_trim + assertTrue(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.smallest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // test + assertFalse(mc.asyncBopInsert(key, 0, null, "item0", + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS)); + + mc.asyncBopDelete(key, 0, 10, ElementFlagFilter.DO_NOT_FILTER, 0, false) + .get(1000, TimeUnit.MILLISECONDS); + } + + public void testBopGet_LargestTrim_OutOfRange() throws Exception { + // Create a set + mc.asyncBopInsert(key, 1, null, "item1", new CollectionAttributes()); + + // largest_trim + assertTrue(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.largest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // test + assertFalse(mc.asyncBopInsert(key, 2, null, "item2", null).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncBopDelete(key, 0, 10, ElementFlagFilter.DO_NOT_FILTER, 0, false) + .get(1000, TimeUnit.MILLISECONDS); + } + + public void testBopGet_AvailableOverflowAction() throws Exception { + // Create a set + mc.asyncBopInsert(key, 0, null, "item0", new CollectionAttributes()); + + // Set OverflowAction + // error + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + // head_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // tail_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.tail_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // smallest_trim + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.smallest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // largest_trim + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.largest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncBopDelete(key, 0, ElementFlagFilter.DO_NOT_FILTER, false).get( + 1000, TimeUnit.MILLISECONDS); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopServerMessageTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopServerMessageTest.java new file mode 100644 index 000000000..d80cba1ca --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopServerMessageTest.java @@ -0,0 +1,224 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.OperationStatus; + +public class BopServerMessageTest extends BaseIntegrationTest { + + private String key = "BopServerMessageTest"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.asyncBopDelete(key, 0, 100, ElementFlagFilter.DO_NOT_FILTER, 0, true) + .get(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testNotFound() throws Exception { + CollectionFuture>> future = (CollectionFuture>>) mc + .asyncBopGet(key, 0, ElementFlagFilter.DO_NOT_FILTER, false, + false); + assertNull(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("NOT_FOUND", status.getMessage()); + } + + public void testNotFoundElement() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + CollectionFuture>> future2 = (CollectionFuture>>) mc + .asyncBopGet(key, 1, ElementFlagFilter.DO_NOT_FILTER, false, + false); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("NOT_FOUND_ELEMENT", status.getMessage()); + } + + public void testCreatedStored() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("CREATED_STORED", status.getMessage()); + } + + public void testStored() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncBopInsert(key, 1, null, 1, + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("STORED", status.getMessage()); + } + + public void testOutOfRange() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 1, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.largest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncBopInsert(key, 2, null, + "bbbb", new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("OUT_OF_RANGE", status.getMessage()); + } + + public void testOverflowed() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncBopInsert(key, 1, null, + "aaa", new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("OVERFLOWED", status.getMessage()); + } + + public void testElementExists() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert an item with same bkey + future = (CollectionFuture) mc.asyncBopInsert(key, 0, null, + "bbbb", new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("ELEMENT_EXISTS", status.getMessage()); + } + + public void testDeletedDropped() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // delete + future = (CollectionFuture) mc.asyncBopDelete(key, 0, + ElementFlagFilter.DO_NOT_FILTER, true); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED_DROPPED", status.getMessage()); + } + + public void testDeleted() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert + future = (CollectionFuture) mc.asyncBopInsert(key, 1, null, + "bbbb", null); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // delete + future = (CollectionFuture) mc.asyncBopDelete(key, 0, + ElementFlagFilter.DO_NOT_FILTER, false); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED", status.getMessage()); + } + + public void testDeletedDroppedAfterRetrieval() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // get + CollectionFuture>> future2 = (CollectionFuture>>) mc + .asyncBopGet(key, 0, ElementFlagFilter.DO_NOT_FILTER, true, + true); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED_DROPPED", status.getMessage()); + } + + public void testDeletedAfterRetrieval() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncBopInsert(key, 0, null, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert + future = (CollectionFuture) mc.asyncBopInsert(key, 1, null, + "bbbb", null); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // get + CollectionFuture>> future2 = (CollectionFuture>>) mc + .asyncBopGet(key, 0, ElementFlagFilter.DO_NOT_FILTER, true, + true); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED", status.getMessage()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopStoreAndGetTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopStoreAndGetTest.java new file mode 100644 index 000000000..211ddd59a --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopStoreAndGetTest.java @@ -0,0 +1,485 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.Arrays; +import java.util.Map; + +import net.spy.memcached.collection.BTreeOrder; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.Element; +import net.spy.memcached.internal.BTreeStoreAndGetFuture; + +public class BopStoreAndGetTest extends BaseIntegrationTest { + + private String key = "BopStoreAndGetTest"; + private String invalidKey = "InvalidBopStoreAndGetTest"; + private String kvKey = "KvBopStoreAndGetTest"; + + private long[] longBkeys = { 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, + 19L }; + private byte[][] byteArrayBkeys = { new byte[] { 10 }, new byte[] { 11 }, + new byte[] { 12 }, new byte[] { 13 }, new byte[] { 14 }, + new byte[] { 15 }, new byte[] { 16 }, new byte[] { 17 }, + new byte[] { 18 }, new byte[] { 19 } }; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(key).get(); + mc.delete(invalidKey).get(); + mc.delete(kvKey).get(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testInsertAndGetTrimmedLongBKey() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, 1000, null, "val", null).get()); + + // expecting that bkey 10 was trimmed out and the first bkey is 11 + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(11L, posMap.get(0).getLongBkey()); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(11) + BTreeStoreAndGetFuture f = mc + .asyncBopInsertAndGetTrimmed(key, 2000, null, "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertEquals(11L, element.getLongBkey()); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the first bkey which is expected to be 12 + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(12L, posMap.get(0).getLongBkey()); + } + + public void testInsertAndGetTrimmedByteArrayBKey() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, new byte[] { 64 }, null, "val", null) + .get()); + + // expecting that bkey byte(10) was trimmed out and the first bkey is + // byte(11) + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 11 }, posMap.get(0) + .getByteArrayBkey())); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(11) + BTreeStoreAndGetFuture f = mc + .asyncBopInsertAndGetTrimmed(key, new byte[] { 65 }, null, + "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertTrue(Arrays.equals(new byte[] { 11 }, element.getByteArrayBkey())); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the first bkey which is expected to be byte(12) + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 12 }, posMap.get(0) + .getByteArrayBkey())); + } + + public void testInsertAndGetTrimmedLongBKeyLargest() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.largest_trim); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, 9, null, "val", null).get()); + + // expecting that bkey 19 was trimmed out and the last bkey is 18 + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(18L, posMap.get(0).getLongBkey()); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(18) + BTreeStoreAndGetFuture f = mc + .asyncBopInsertAndGetTrimmed(key, 8, null, "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertEquals(18L, element.getLongBkey()); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the last bkey which is expected to be 17 + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(17L, posMap.get(0).getLongBkey()); + } + + public void testInsertAndGetTrimmedByteArrayBKeyLargest() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.largest_trim); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, new byte[] { 9 }, null, "val", null) + .get()); + + // expecting that bkey byte(19) was trimmed out and the last bkey is + // byte(18) + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 18 }, posMap.get(0) + .getByteArrayBkey())); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(18) + BTreeStoreAndGetFuture f = mc + .asyncBopInsertAndGetTrimmed(key, new byte[] { 8 }, null, + "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertTrue(Arrays.equals(new byte[] { 18 }, element.getByteArrayBkey())); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the last bkey which is expected to be byte(17) + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 17 }, posMap.get(0) + .getByteArrayBkey())); + } + + public void testUpsertAndGetTrimmedLongBKey() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, 1000, null, "val", null).get()); + + // expecting that bkey 10 was trimmed out and the first bkey is 11 + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(11L, posMap.get(0).getLongBkey()); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(11) + BTreeStoreAndGetFuture f = mc + .asyncBopUpsertAndGetTrimmed(key, 2000, null, "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertEquals(11L, element.getLongBkey()); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the first bkey which is expected to be 12 + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(12L, posMap.get(0).getLongBkey()); + } + + public void testUpsertAndGetTrimmedByteArrayBKey() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, new byte[] { 64 }, null, "val", null) + .get()); + + // expecting that bkey byte(10) was trimmed out and the first bkey is + // byte(11) + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 11 }, posMap.get(0) + .getByteArrayBkey())); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(11) + BTreeStoreAndGetFuture f = mc + .asyncBopUpsertAndGetTrimmed(key, new byte[] { 65 }, null, + "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertTrue(Arrays.equals(new byte[] { 11 }, element.getByteArrayBkey())); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the first bkey which is expected to be byte(12) + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.ASC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 12 }, posMap.get(0) + .getByteArrayBkey())); + } + + public void testUpsertAndGetTrimmedLongBKeyLargest() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.largest_trim); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, 9, null, "val", null).get()); + + // expecting that bkey 19 was trimmed out and the last bkey is 18 + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(18L, posMap.get(0).getLongBkey()); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(18) + BTreeStoreAndGetFuture f = mc + .asyncBopUpsertAndGetTrimmed(key, 8, null, "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertEquals(18L, element.getLongBkey()); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the last bkey which is expected to be 17 + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertEquals(17L, posMap.get(0).getLongBkey()); + } + + public void testUpsertAndGetTrimmedByteArrayBKeyLargest() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.largest_trim); + for (byte[] each : byteArrayBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // cause an overflow + assertTrue(mc.asyncBopInsert(key, new byte[] { 9 }, null, "val", null) + .get()); + + // expecting that bkey byte(19) was trimmed out and the last bkey is + // byte(18) + Map> posMap = mc.asyncBopGetByPosition(key, + BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 18 }, posMap.get(0) + .getByteArrayBkey())); + + // then cause an overflow again and get a trimmed object + // it would be a bkey(18) + BTreeStoreAndGetFuture f = mc + .asyncBopUpsertAndGetTrimmed(key, new byte[] { 8 }, null, + "val", null); + boolean succeeded = f.get(); + Element element = f.getElement(); + assertTrue(succeeded); + assertNotNull(element); + assertTrue(Arrays.equals(new byte[] { 18 }, element.getByteArrayBkey())); + System.out.println("The insertion was succeeded and an element " + + f.getElement() + " was trimmed out"); + + // finally check the last bkey which is expected to be byte(17) + posMap = mc.asyncBopGetByPosition(key, BTreeOrder.DESC, 0).get(); + assertNotNull(posMap); + assertNotNull(posMap.get(0)); // the first element + assertTrue(Arrays.equals(new byte[] { 17 }, posMap.get(0) + .getByteArrayBkey())); + } + + public void testInsertAndGetTrimmedOtherResponses() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // set a test key + mc.set(kvKey, 0, "value").get(); + + BTreeStoreAndGetFuture f = null; + Boolean result = null; + + // NOT_FOUND + f = mc.asyncBopInsertAndGetTrimmed(invalidKey, 1, null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.NOT_FOUND, f.getOperationStatus() + .getResponse()); + + // OUT_OF_RANGE + f = mc.asyncBopInsertAndGetTrimmed(key, 1, null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.OUT_OF_RANGE, f.getOperationStatus() + .getResponse()); + + // TYPE_MISMATCH + f = mc.asyncBopInsertAndGetTrimmed(kvKey, 1, null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.TYPE_MISMATCH, f.getOperationStatus() + .getResponse()); + + // BKEY_MISMATCH + f = mc.asyncBopInsertAndGetTrimmed(key, byteArrayBkeys[0], null, "val", + null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.BKEY_MISMATCH, f.getOperationStatus() + .getResponse()); + + // ELEMENT_EXISTS + f = mc.asyncBopInsertAndGetTrimmed(key, longBkeys[0], null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.ELEMENT_EXISTS, f.getOperationStatus() + .getResponse()); + } + + public void testUpsertAndGetTrimmedOtherResponses() throws Exception { + // insert test data + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + attrs.setOverflowAction(CollectionOverflowAction.smallest_trim); + for (long each : longBkeys) { + mc.asyncBopInsert(key, each, null, "val", attrs).get(); + } + + // set a test key + mc.set(kvKey, 0, "value").get(); + + BTreeStoreAndGetFuture f = null; + Boolean result = null; + + // NOT_FOUND + f = mc.asyncBopUpsertAndGetTrimmed(invalidKey, 1, null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.NOT_FOUND, f.getOperationStatus() + .getResponse()); + + // OUT_OF_RANGE + f = mc.asyncBopUpsertAndGetTrimmed(key, 1, null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.OUT_OF_RANGE, f.getOperationStatus() + .getResponse()); + + // TYPE_MISMATCH + f = mc.asyncBopUpsertAndGetTrimmed(kvKey, 1, null, "val", null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.TYPE_MISMATCH, f.getOperationStatus() + .getResponse()); + + // BKEY_MISMATCH + f = mc.asyncBopUpsertAndGetTrimmed(key, byteArrayBkeys[0], null, "val", + null); + result = f.get(); + assertFalse(result); + assertEquals(CollectionResponse.BKEY_MISMATCH, f.getOperationStatus() + .getResponse()); + + // REPLACED + f = mc.asyncBopUpsertAndGetTrimmed(key, longBkeys[0], null, "val", null); + result = f.get(); + assertTrue(result); + assertEquals(CollectionResponse.REPLACED, f.getOperationStatus() + .getResponse()); + assertNull(f.getElement()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopUpdateTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopUpdateTest.java new file mode 100644 index 000000000..e7ec7c06c --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopUpdateTest.java @@ -0,0 +1,307 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import java.util.concurrent.ExecutionException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagUpdate; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; + +public class BopUpdateTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + + private final long BKEY = 1000L; + + private final String VALUE = "VALUE"; + private final String NEW_VALUE = "NEWVALUE"; + + private final String EFLAG = "EFLAG"; + private final String NEW_EFLAG = "NEW_EFLAG"; + + private final byte[] BYTE_EFLAG = new byte[] { 1, 0, 0, 0 }; + private final byte[] NEW_BYTE_EFLAG = new byte[] { 1, 1, 0, 0 }; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + // + // without bitop + // + + public void testUpdateZeroLengthEflag() { + byte[] eflag = new byte[] { 0, 0, 0, 0 }; + + try { + // insert one + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + // update eflag only + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(eflag), null).get()); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + } + + public void testUpdatePaddingRequired() { + byte[] eflag = new byte[] { 1, 0, 0, 0 }; + + try { + // insert one + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + // update eflag only + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(eflag), null).get()); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + } + + public void testUpdateExceededLengthEFlag() { + byte[] eflag = "1234567890123456789012345678901234567890".getBytes(); + + try { + // insert one + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + // update eflag only + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(eflag), null).get()); + } catch (IllegalArgumentException e) { + + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testUpdateNotExistsKey() { + try { + // update value only + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, null, VALUE).get()); + + // update eflag only + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), null).get()); + + // update both value and eflag + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), VALUE).get()); + + // delete eflag + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + ElementFlagUpdate.RESET_FLAG, null).get()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testExistsKey() { + try { + // + // insert one + // + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + // + // update value only + // + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, null, NEW_VALUE) + .get()); + + Assert.assertEquals( + NEW_VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + // + // update eflag only + // + Assert.assertTrue(mc + .asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, NEW_EFLAG + .getBytes()), false, false).get().isEmpty()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(NEW_EFLAG.getBytes()), null).get()); + + Assert.assertEquals( + NEW_VALUE, + mc.asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, NEW_EFLAG + .getBytes()), false, false).get().get(BKEY) + .getValue()); + + // + // update both value and eflag + // + Assert.assertEquals( + NEW_VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), VALUE).get()); + + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + // + // delete eflag + // + Assert.assertEquals( + VALUE, + mc.asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, EFLAG + .getBytes()), false, false).get().get(BKEY) + .getValue()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + ElementFlagUpdate.RESET_FLAG, VALUE).get()); + + Assert.assertTrue(mc + .asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, EFLAG + .getBytes()), false, false).get().isEmpty()); + + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + // + // with bitop + // + public void testExistsKeyWithBitOp() { + try { + // + // insert one + // + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, BYTE_EFLAG, VALUE, + new CollectionAttributes()).get()); + // 0x01 00 00 00 + + // + // update eflag only + // + Assert.assertTrue(mc + .asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, + NEW_BYTE_EFLAG), false, false).get() + .isEmpty()); + + Assert.assertTrue(mc.asyncBopUpdate( + KEY, + BKEY, + new ElementFlagUpdate(1, BitWiseOperands.OR, + new byte[] { 1 }), null).get()); + // 0x01 01 00 00 + + Assert.assertEquals( + VALUE, + mc.asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, + NEW_BYTE_EFLAG), false, false).get() + .get(BKEY).getValue()); + + // + // update both value and eflag + // + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(NEW_EFLAG.getBytes()), NEW_VALUE) + .get()); + + Assert.assertEquals( + NEW_VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + // + // delete eflag + // + Assert.assertEquals( + NEW_VALUE, + mc.asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, NEW_EFLAG + .getBytes()), false, false).get().get(BKEY) + .getValue()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + ElementFlagUpdate.RESET_FLAG, VALUE).get()); + + Assert.assertTrue(mc + .asyncBopGet( + KEY, + BKEY, + new ElementFlagFilter(CompOperands.Equal, EFLAG + .getBytes()), false, false).get().isEmpty()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/BopUpsertTest.java b/src/test/manual/net/spy/memcached/collection/btree/BopUpsertTest.java new file mode 100644 index 000000000..e071a44c6 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/BopUpsertTest.java @@ -0,0 +1,51 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class BopUpsertTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testUpsertNotExistsKey() { + try { + boolean result = mc.asyncBopUpsert(KEY, BKEY, "eflag".getBytes(), + "VALUE", new CollectionAttributes()).get(); + + Assert.assertTrue("Upsert failed", result); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopCountWithElementFlagFilterTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopCountWithElementFlagFilterTest.java new file mode 100644 index 000000000..bc89a81d8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopCountWithElementFlagFilterTest.java @@ -0,0 +1,192 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; +import net.spy.memcached.internal.CollectionFuture; + +public class BopCountWithElementFlagFilterTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final byte[] BKEY = new byte[] { (byte) 1 }; + private final byte[] BKEY2 = new byte[] { (byte) 2 }; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testGetBKeyCountFromInvalidKey() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.GreaterThan, "1".getBytes()); + + CollectionFuture future = mc.asyncBopGetItemCount( + "INVALIDKEY", BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.NOT_FOUND, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromInvalidType() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // insert value into set + Boolean insertResult = mc.asyncSopInsert(KEY, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + // get count from key + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.TYPE_MISMATCH, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY2, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(1), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty2() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY2, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY2, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(1), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty3() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY2, + "eflageflag".getBytes(), "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(1), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetBulkTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetBulkTest.java new file mode 100644 index 000000000..37f4d412d --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetBulkTest.java @@ -0,0 +1,397 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BTreeElement; +import net.spy.memcached.collection.BTreeGetResult; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.internal.CollectionGetBulkFuture; + +public class BopGetBulkTest extends BaseIntegrationTest { + + private final List keyList = new ArrayList() { + private static final long serialVersionUID = -4044682425313432602L; + { + add("BopGetBulkTest1"); + add("BopGetBulkTest2"); + add("BopGetBulkTest3"); + add("BopGetBulkTest4"); + add("BopGetBulkTest5"); + } + }; + + private final List keyList2 = new ArrayList() { + private static final long serialVersionUID = -4044682425313432602L; + { + for (int i = 1; i < 500; i++) { + add("BopGetBulkTest" + i); + } + } + }; + + private final byte[] eFlag = { 1, 8, 16, 32, 64 }; + + private final String value = String.valueOf(new Random().nextLong()); + + @Override + protected void setUp() throws Exception { + super.setUp(); + try { + for (int i = 0; i < keyList.size(); i++) { + mc.delete(keyList.get(i)).get(); + mc.asyncBopInsert(keyList.get(i), new byte[] { 0 }, null, + value + "0", new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(i), new byte[] { 1 }, eFlag, + value + "1", new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(i), new byte[] { 2 }, null, + value + "2", new CollectionAttributes()).get(); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + + } + + public void testGetBulkLongBkeyGetAll() { + try { + ElementFlagFilter filter = ElementFlagFilter.DO_NOT_FILTER; + + CollectionGetBulkFuture>> f = mc + .asyncBopGetBulk(keyList, ByteArrayBKey.MIN, + ByteArrayBKey.MAX, filter, 0, 10); + + Map> results = f.get( + 1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + + // System.out.println("\n\n\n"); + // for(Entry> entry : + // results.entrySet()) { + // System.out.println("\nk=" + entry.getKey()); + // System.out.println("code=" + + // entry.getValue().getCollectionResponse().getMessage()); + // + // if (entry.getValue().getElements() != null) { + // for(Entry> el + // : entry.getValue().getElements().entrySet()) { + // System.out.println("bkey=" + el.getKey() + ", eflag=" + + // Arrays.toString(el.getValue().getEflag()) + ", value=" + + // el.getValue().getValue()); + // } + // } + // } + + for (int i = 0; i < keyList.size(); i++) { + BTreeGetResult r = results.get(keyList + .get(i)); + + // check response + Assert.assertNotNull(r.getCollectionResponse().getResponse()); + // Assert.assertEquals(CollectionResponse.OK, + // r.getCollectionResponse().getResponse()); + + // check elements + Map> elements = r + .getElements(); + + Assert.assertEquals(3, elements.size()); + + Assert.assertTrue(Arrays.equals(eFlag, + elements.get(new byte[] { 1 }).getEflag())); + + for (long j = 0; j < elements.size(); j++) { + Assert.assertTrue(Arrays.equals(new byte[] { (byte) j }, + elements.get(new byte[] { (byte) j }).getBkey() + .getBytes())); + Assert.assertEquals(value + j, + (String) elements.get(new byte[] { (byte) j }) + .getValue()); + } + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBulkNotFoundAll() { + try { + for (int i = 0; i < keyList.size(); i++) { + mc.delete(keyList.get(i)).get(); + } + + ElementFlagFilter filter = ElementFlagFilter.DO_NOT_FILTER; + + CollectionGetBulkFuture>> f = mc + .asyncBopGetBulk(keyList, ByteArrayBKey.MIN, + ByteArrayBKey.MAX, filter, 0, 10); + + Map> results = f.get( + 1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + + // System.out.println("\n\n\n"); + // for(Entry> entry : + // results.entrySet()) { + // System.out.println("\nk=" + entry.getKey()); + // System.out.println("code=" + + // entry.getValue().getCollectionResponse().getMessage()); + // + // if (entry.getValue().getElements() != null) { + // for(Entry> el + // : entry.getValue().getElements().entrySet()) { + // System.out.println("bkey=" + el.getKey() + ", eflag=" + + // Arrays.toString(el.getValue().getEflag()) + ", value=" + + // el.getValue().getValue()); + // } + // } + // } + + for (int i = 0; i < keyList.size(); i++) { + BTreeGetResult r = results.get(keyList + .get(i)); + + Assert.assertEquals(CollectionResponse.NOT_FOUND, r + .getCollectionResponse().getResponse()); + Assert.assertNull(r.getElements()); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBulkNotFoundMixed() { + try { + // delete some data. + for (int i = 0; i < keyList.size(); i++) { + if (i % 2 == 0) + mc.delete(keyList.get(i)).get(); + } + + ElementFlagFilter filter = ElementFlagFilter.DO_NOT_FILTER; + + CollectionGetBulkFuture>> f = mc + .asyncBopGetBulk(keyList, ByteArrayBKey.MIN, + ByteArrayBKey.MAX, filter, 0, 10); + + Map> results = f.get( + 1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + + // System.out.println("\n\n\n"); + // for(Entry> entry : + // results.entrySet()) { + // System.out.println("\nk=" + entry.getKey()); + // System.out.println("code=" + + // entry.getValue().getCollectionResponse().getMessage()); + // + // if (entry.getValue().getElements() != null) { + // for(Entry> el + // : entry.getValue().getElements().entrySet()) { + // System.out.println("bkey=" + el.getKey() + ", eflag=" + + // Arrays.toString(el.getValue().getEflag()) + ", value=" + + // el.getValue().getValue()); + // } + // } + // } + + // check result + for (int i = 0; i < keyList.size(); i++) { + BTreeGetResult r = results.get(keyList + .get(i)); + + if (i % 2 == 0) { + Assert.assertEquals(CollectionResponse.NOT_FOUND, r + .getCollectionResponse().getResponse()); + } else { + Assert.assertEquals(CollectionResponse.OK, r + .getCollectionResponse().getResponse()); + + Map> elements = r + .getElements(); + + Assert.assertEquals(3, elements.size()); + + Assert.assertTrue(Arrays.equals(eFlag, + elements.get(new byte[] { 1 }).getEflag())); + + for (long j = 0; j < elements.size(); j++) { + Assert.assertTrue(Arrays.equals( + new byte[] { (byte) j }, + elements.get(new byte[] { (byte) j }).getBkey() + .getBytes())); + Assert.assertEquals(value + j, + (String) elements.get(new byte[] { (byte) j }) + .getValue()); + } + } + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testErrorArguments() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + // empty key list + f = mc.asyncBopGetBulk(new ArrayList(), new byte[] { 0 }, + new byte[] { 10 }, ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + Assert.assertEquals(0, results.size()); + + // max key list + try { + f = mc.asyncBopGetBulk(keyList2, new byte[] { 0 }, + new byte[] { 10 }, ElementFlagFilter.DO_NOT_FILTER, 0, + 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + } catch (IllegalArgumentException e) { + + } + + // max count list + try { + f = mc.asyncBopGetBulk(keyList, new byte[] { 0 }, + new byte[] { 10 }, ElementFlagFilter.DO_NOT_FILTER, 0, + 1000); + results = f.get(1000L, TimeUnit.MILLISECONDS); + } catch (IllegalArgumentException e) { + + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testUnreadable() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setReadable(false); + mc.asyncBopInsert(keyList.get(0), new byte[] { 0 }, null, + value + "0", attrs).get(); + + f = mc.asyncBopGetBulk(keyList, new byte[] { 0 }, + new byte[] { 10 }, ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + Assert.assertEquals("UNREADABLE", results.get(keyList.get(0)) + .getCollectionResponse().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testNotFoundElement() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + mc.asyncBopInsert(keyList.get(0), new byte[] { 0 }, null, + value + "0", new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), new byte[] { 1 }, eFlag, + value + "1", new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), new byte[] { 2 }, null, + value + "2", new CollectionAttributes()).get(); + + f = mc.asyncBopGetBulk(keyList, new byte[] { 32 }, + new byte[] { 64 }, ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + for (int i = 0; i < results.size(); i++) { + Assert.assertEquals("NOT_FOUND_ELEMENT", + results.get(keyList.get(i)).getCollectionResponse() + .getMessage()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testTypeMismatch() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + mc.set(keyList.get(0), 10, "V").get(200L, TimeUnit.MILLISECONDS); + + f = mc.asyncBopGetBulk(keyList, new byte[] { 0 }, + new byte[] { 10 }, ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + Assert.assertEquals("TYPE_MISMATCH", results.get(keyList.get(0)) + .getCollectionResponse().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testBKeyMismatch() { + try { + Map> results = null; + CollectionGetBulkFuture>> f = null; + + mc.delete(keyList.get(0)).get(); + mc.asyncBopInsert(keyList.get(0), 0, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), 1, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(keyList.get(0), 2, null, value + "0", + new CollectionAttributes()).get(); + + f = mc.asyncBopGetBulk(keyList, new byte[] { 0 }, + new byte[] { 10 }, ElementFlagFilter.DO_NOT_FILTER, 0, 10); + results = f.get(1000L, TimeUnit.MILLISECONDS); + + Assert.assertEquals(keyList.size(), results.size()); + Assert.assertEquals("BKEY_MISMATCH", results.get(keyList.get(0)) + .getCollectionResponse().getMessage()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetIrregularEflagTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetIrregularEflagTest.java new file mode 100644 index 000000000..ee1de0a41 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetIrregularEflagTest.java @@ -0,0 +1,149 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +public class BopGetIrregularEflagTest extends BaseIntegrationTest { + + private final String key = "BopGetIrregularEflagTest"; + + private final byte[] eFlag = { 1 }; + + private final Object value = "valvalvalvalvalvalvalvalvalval"; + + public void testGetAll_1() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, new byte[] { 0 }, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 1 }, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 2 }, eFlag, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, + new byte[] { 0 }, new byte[] { 10 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Element object = map.get(new byte[] { (byte) i }); + Assert.assertEquals(value + String.valueOf(i), + object.getValue()); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetAll_2() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, new byte[] { 0 }, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 1 }, eFlag, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 2 }, eFlag, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, + new byte[] { 0 }, new byte[] { 10 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Element object = map.get(new byte[] { (byte) i }); + Assert.assertEquals(value + String.valueOf(i), + object.getValue()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetAll_3() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, new byte[] { 0 }, eFlag, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 1 }, null, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 2 }, eFlag, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, + new byte[] { 0 }, new byte[] { 10 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Element object = map.get(new byte[] { (byte) i }); + Assert.assertEquals(value + String.valueOf(i), + object.getValue()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testGetAll_4() { + try { + mc.delete(key).get(); + mc.asyncBopInsert(key, new byte[] { 0 }, null, value + "0", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 1 }, null, value + "1", + new CollectionAttributes()).get(); + mc.asyncBopInsert(key, new byte[] { 2 }, null, value + "2", + new CollectionAttributes()).get(); + + Map> map = mc.asyncBopGet(key, + new byte[] { 0 }, new byte[] { 10 }, + ElementFlagFilter.DO_NOT_FILTER, 0, 0, false, false).get( + 100L, TimeUnit.MILLISECONDS); + + Assert.assertNotNull(map); + Assert.assertEquals(3, map.size()); + + for (long i = 0; i < map.size(); i++) { + Element object = map.get(new byte[] { (byte) i }); + Assert.assertEquals(value + String.valueOf(i), + object.getValue()); + } + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetTest.java new file mode 100644 index 000000000..b21446887 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopGetTest.java @@ -0,0 +1,67 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +public class BopGetTest extends BaseIntegrationTest { + + private static final String KEY = BopGetTest.class.getSimpleName(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testBopGet() throws Exception { + + byte[] bkey = new byte[] { (byte) 1 }; + + Boolean boolean1 = mc.asyncBopInsert(KEY, bkey, null, "value", + new CollectionAttributes()).get(); + + Assert.assertTrue(boolean1); + + Map> map = mc.asyncBopGet(KEY, bkey, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + + Assert.assertEquals(1, map.size()); + + Element el = map.get(new ByteArrayBKey(bkey)); + + Assert.assertNotNull(el); + + Assert.assertEquals("value", el.getValue()); + Assert.assertEquals("0x01", el.getBkeyByHex()); + Assert.assertNull(el.getFlagByHex()); + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopInsertAndGetWithElementFlagTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopInsertAndGetWithElementFlagTest.java new file mode 100644 index 000000000..e93df5228 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopInsertAndGetWithElementFlagTest.java @@ -0,0 +1,123 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import java.util.Arrays; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +import org.junit.Assert; + +public class BopInsertAndGetWithElementFlagTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final byte[] BKEY = new byte[] { (byte) 1 }; + private final byte[] BKEY2 = new byte[] { (byte) 2 }; + private final byte[] BKEY3 = new byte[] { (byte) 3 }; + private final String VALUE = "VALUE"; + private final byte[] FLAG = "FLAG".getBytes(); + private final byte[] FLAG2 = "GLAF".getBytes(); + private final byte[] FLAG3 = "FFFF".getBytes(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + }; + + public void testSingleLongBkeyWithEFlag() throws Exception { + + // insert one + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, FLAG, VALUE, + new CollectionAttributes()).get()); + + // get + Map> map = mc.asyncBopGet(KEY, BKEY, + BKEY, ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false) + .get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + + for (Entry> i : map.entrySet()) { + Assert.assertTrue(Arrays.equals(BKEY, i.getKey().getBytes())); + Assert.assertEquals(VALUE, i.getValue().getValue()); + Assert.assertTrue(Arrays.equals(FLAG, i.getValue().getFlag())); + } + + // delete + Assert.assertTrue(mc.asyncBopDelete(KEY, BKEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, 100, false).get()); + + // get again + map = mc.asyncBopGet(KEY, BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + 0, 10, false, false).get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(0, map.size()); + + } + + public void testMultipleLongBkeyWithEFlag() throws Exception { + + // insert 3 elements + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, FLAG, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY2, FLAG2, VALUE, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY3, FLAG3, VALUE, + new CollectionAttributes()).get()); + + // get 3 elements + Map> map = mc.asyncBopGet(KEY, BKEY, + BKEY3, ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false) + .get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(3, map.size()); + + Assert.assertEquals(VALUE, map.get(BKEY).getValue()); + Assert.assertEquals(VALUE, map.get(BKEY2).getValue()); + Assert.assertEquals(VALUE, map.get(BKEY3).getValue()); + + Assert.assertTrue(Arrays.equals(FLAG, map.get(BKEY).getFlag())); + Assert.assertTrue(Arrays.equals(FLAG2, map.get(BKEY2).getFlag())); + Assert.assertTrue(Arrays.equals(FLAG3, map.get(BKEY3).getFlag())); + + // delete only 2 elements + Assert.assertTrue(mc.asyncBopDelete(KEY, BKEY, BKEY2, + ElementFlagFilter.DO_NOT_FILTER, 100, false).get()); + + // get all again + map = mc.asyncBopGet(KEY, BKEY, BKEY3, ElementFlagFilter.DO_NOT_FILTER, + 0, 10, false, false).get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + Assert.assertEquals(VALUE, map.get(BKEY3).getValue()); + Assert.assertTrue(Arrays.equals(FLAG3, map.get(BKEY3).getFlag())); + } +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpdateTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpdateTest.java new file mode 100644 index 000000000..02bc9f3c8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpdateTest.java @@ -0,0 +1,161 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.collection.ElementFlagUpdate; + +public class BopUpdateTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final byte[] BKEY = new byte[] { (byte) 1 }; + private final String VALUE = "VALUE"; + private final String EFLAG = "EFLAG"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testNotExistsUpdateWithValue() { + try { + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(new byte[] { 0 }), VALUE).get()); + + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), VALUE).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testNotExistsUpdateWithoutValue() { + try { + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(new byte[] { 0 }), null).get()); + + Assert.assertFalse(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), null).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testExistsUpdateWithValue() { + try { + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(new byte[] { 0 }), VALUE).get()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), VALUE).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testExistsUpdateWithoutValue() { + try { + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(new byte[] { 0 }), null).get()); + + Assert.assertTrue(mc.asyncBopUpdate(KEY, BKEY, + new ElementFlagUpdate(EFLAG.getBytes()), null).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + // + // + // + // with bitop + // + // + // + + public void testNotExistsUpdateUsingBitOpWithValue() { + try { + Assert.assertFalse(mc.asyncBopUpdate( + KEY, + BKEY, + new ElementFlagUpdate(0, BitWiseOperands.AND, EFLAG + .getBytes()), VALUE).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testNotExistsUpdateUsingBitOpWithoutValue() { + try { + Assert.assertFalse(mc.asyncBopUpdate( + KEY, + BKEY, + new ElementFlagUpdate(0, BitWiseOperands.AND, EFLAG + .getBytes()), null).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testExistsUpdateUsingBitOpWithValue() { + try { + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, EFLAG.getBytes(), + VALUE, new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopUpdate( + KEY, + BKEY, + new ElementFlagUpdate(0, BitWiseOperands.AND, EFLAG + .getBytes()), VALUE).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testExistsUpdateUsingBitOpWithoutValue() { + try { + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, EFLAG.getBytes(), + VALUE, new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncBopUpdate( + KEY, + BKEY, + new ElementFlagUpdate(0, BitWiseOperands.AND, EFLAG + .getBytes()), null).get()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpsertTest.java b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpsertTest.java new file mode 100644 index 000000000..82edc5adc --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/btree/longbkey/BopUpsertTest.java @@ -0,0 +1,139 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.btree.longbkey; + +import java.util.Arrays; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; + +import org.junit.Assert; + +public class BopUpsertTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final byte[] BKEY = new byte[] { (byte) 1 }; + private final String VALUE = "VALUE"; + private final String VALUE2 = "EULAV"; + private final byte[] FLAG = "FLAG".getBytes(); + private final byte[] FLAG2 = "GALF".getBytes(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + }; + + public void testUpsertExistsValueOnly() throws Exception { + // insert one + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, FLAG, VALUE, + new CollectionAttributes()).get()); + + // get + Map> map = mc.asyncBopGet(KEY, BKEY, + BKEY, ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false) + .get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + + for (Entry> i : map.entrySet()) { + Assert.assertTrue(Arrays.equals(BKEY, i.getKey().getBytes())); + Assert.assertEquals(VALUE, i.getValue().getValue()); + Assert.assertTrue(Arrays.equals(FLAG, i.getValue().getFlag())); + } + + // upsert + Assert.assertTrue(mc.asyncBopUpsert(KEY, BKEY, null, VALUE2, null) + .get()); + + // get again + map = mc.asyncBopGet(KEY, BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + 0, 10, false, false).get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + for (Entry> i : map.entrySet()) { + Assert.assertTrue(Arrays.equals(BKEY, i.getKey().getBytes())); + Assert.assertEquals(VALUE2, i.getValue().getValue()); + Assert.assertNull(i.getValue().getFlag()); + } + } + + public void testUpsertNotExistsValueOnly() throws Exception { + // upsert + Assert.assertTrue(mc.asyncBopUpsert(KEY, BKEY, null, VALUE2, + new CollectionAttributes()).get()); + + // get again + Map> map = mc.asyncBopGet(KEY, BKEY, + BKEY, ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false) + .get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + for (Entry> i : map.entrySet()) { + Assert.assertTrue(Arrays.equals(BKEY, i.getKey().getBytes())); + Assert.assertEquals(VALUE2, i.getValue().getValue()); + Assert.assertNull(i.getValue().getFlag()); + } + } + + public void testUpsertExistsEFlagOnly() throws Exception { + // insert one + Assert.assertTrue(mc.asyncBopInsert(KEY, BKEY, FLAG, VALUE, + new CollectionAttributes()).get()); + + // get + Map> map = mc.asyncBopGet(KEY, BKEY, + BKEY, ElementFlagFilter.DO_NOT_FILTER, 0, 10, false, false) + .get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + + for (Entry> i : map.entrySet()) { + Assert.assertTrue(Arrays.equals(BKEY, i.getKey().getBytes())); + Assert.assertEquals(VALUE, i.getValue().getValue()); + Assert.assertTrue(Arrays.equals(FLAG, i.getValue().getFlag())); + } + + // upsert + Assert.assertTrue(mc.asyncBopUpsert(KEY, BKEY, FLAG2, VALUE2, null) + .get()); + + // get again + map = mc.asyncBopGet(KEY, BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + 0, 10, false, false).get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + + Assert.assertEquals(1, map.size()); + for (Entry> i : map.entrySet()) { + Assert.assertTrue(Arrays.equals(BKEY, i.getKey().getBytes())); + Assert.assertEquals(VALUE2, i.getValue().getValue()); + Assert.assertTrue(Arrays.equals(FLAG2, i.getValue().getFlag())); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopBulkAPITest.java b/src/test/manual/net/spy/memcached/collection/list/LopBulkAPITest.java new file mode 100644 index 000000000..7792049d6 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopBulkAPITest.java @@ -0,0 +1,122 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class LopBulkAPITest extends BaseIntegrationTest { + + private String key = "LopBulkAPITest33"; + List valueList = new ArrayList(); + + private int getValueCount() { + return mc.getMaxPipedItemCount(); + } + + protected void setUp() throws Exception { + super.setUp(); + for (long i = 0; i < getValueCount(); i++) { + valueList.add("value" + String.valueOf(i)); + } + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testBulk() throws Exception { + for (int i = 0; i < 10; i++) { + mc.asyncLopDelete(key, 0, 4000, true).get(1000, + TimeUnit.MILLISECONDS); + bulk(); + } + } + + public void bulk() { + try { + Future> future = mc + .asyncLopPipedInsertBulk(key, 0, valueList, + new CollectionAttributes()); + + Map map = future.get(10000, + TimeUnit.MILLISECONDS); + + List list = mc.asyncLopGet(key, 0, getValueCount(), false, + false).get(); + assertEquals(getValueCount(), list.size()); + assertEquals(0, map.size()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBulkFailed() { + try { + mc.asyncLopDelete(key, 0, 4000, true).get(1000, + TimeUnit.MILLISECONDS); + + mc.asyncLopInsert(key, 0, "value1", new CollectionAttributes()) + .get(); + + mc.asyncSetAttr(key, 0, 1L, CollectionOverflowAction.error).get(); + + CollectionFuture> future = mc + .asyncLopPipedInsertBulk(key, 0, valueList, + new CollectionAttributes()); + + Map map = future.get(10000, + TimeUnit.MILLISECONDS); + + assertEquals(getValueCount(), map.size()); + assertFalse(future.getOperationStatus().isSuccess()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBulkEmptyList() { + try { + CollectionFuture> future = mc + .asyncLopPipedInsertBulk(key, 0, new ArrayList(0), + new CollectionAttributes()); + + future.get(10000, TimeUnit.MILLISECONDS); + Assert.fail(); + } catch (IllegalArgumentException e) { + return; + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + Assert.fail(); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopDeleteTest.java b/src/test/manual/net/spy/memcached/collection/list/LopDeleteTest.java new file mode 100644 index 000000000..051824f47 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopDeleteTest.java @@ -0,0 +1,85 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class LopDeleteTest extends BaseIntegrationTest { + + private String key = "LopDeleteTest"; + + private Long[] items9 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L }; + + protected void setUp() throws Exception { + super.setUp(); + + deleteList(key, 1000); + addToList(key, items9); + + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + } + + protected void tearDown() throws Exception { + try { + deleteList(key, 1000); + super.tearDown(); + } catch (Exception e) { + } + } + + public void testLopDelete_NoKey() throws Exception { + assertFalse(mc.asyncLopDelete("no_key", 0, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopDelete_OutOfRange() throws Exception { + assertFalse(mc.asyncLopDelete(key, 11, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopDelete_DeleteByBestEffort() throws Exception { + // Delete items(2..11) in the list + assertTrue(mc.asyncLopDelete(key, 2, 11, false).get(1000, + TimeUnit.MILLISECONDS)); + + List rlist = mc.asyncLopGet(key, 0, 100, false, false).get( + 1000, TimeUnit.MILLISECONDS); + + // By rule of 'best effort', + // items(2..9) should be deleted + assertEquals(2, rlist.size()); + assertEquals(1L, rlist.get(0)); + assertEquals(2L, rlist.get(1)); + } + + public void testLopDelete_DeletedDropped() throws Exception { + // Delete all items in the list + assertTrue(mc.asyncLopDelete(key, 0, items9.length, true).get(1000, + TimeUnit.MILLISECONDS)); + + CollectionAttributes attrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertNull(attrs); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopGetTest.java b/src/test/manual/net/spy/memcached/collection/list/LopGetTest.java new file mode 100644 index 000000000..c8fa6d199 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopGetTest.java @@ -0,0 +1,104 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class LopGetTest extends BaseIntegrationTest { + + private String key = "LopGetTest"; + + private Long[] items9 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L }; + + protected void setUp() throws Exception { + super.setUp(); + + deleteList(key, 1000); + addToList(key, items9); + + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + } + + protected void tearDown() throws Exception { + try { + deleteList(key, 1000); + super.tearDown(); + } catch (Exception e) { + } + } + + public void testLopGet_NoKey() throws Exception { + List rlist = mc.asyncLopGet("no_key", 0, false, false).get( + 1000, TimeUnit.MILLISECONDS); + + // We've got an empty list + assertNull(rlist); + } + + public void testLopGet_OutOfRange() throws Exception { + List list = mc.asyncLopGet(key, 20, false, false).get(1000, + TimeUnit.MILLISECONDS); + assertNotNull(list); + assertTrue(list.isEmpty()); + } + + public void testLopGet_GetByBestEffort() throws Exception { + // Retrieve items(2..11) in the list + List rlist = mc.asyncLopGet(key, 2, 11, false, false).get(1000, + TimeUnit.MILLISECONDS); + + // By rule of 'best effort', + // items(2..9) should be retrieved + assertEquals(7, rlist.size()); + for (int i = 0; i < rlist.size(); i++) { + assertEquals(items9[i + 2], rlist.get(i)); + } + } + + public void testLopGet_GetWithDeletion() throws Exception { + CollectionAttributes attrs = null; + List rlist = null; + + // Retrieve items(0..5) in the list with delete option + rlist = mc.asyncLopGet(key, 0, 5, true, false).get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(6, rlist.size()); + + // Check the remaining item count in the list + attrs = mc.asyncGetAttr(key).get(1000, TimeUnit.MILLISECONDS); + assertEquals(3, attrs.getCount().intValue()); + + // Retrieve items(0..2) in the list with delete option + rlist = mc.asyncLopGet(key, 0, 2, true, true).get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(3, rlist.size()); + + // Now our list has no items and would be deleted + rlist = mc.asyncLopGet(key, 0, 10, true, false).get(1000, + TimeUnit.MILLISECONDS); + assertNull(rlist); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopInsertBoundary.java b/src/test/manual/net/spy/memcached/collection/list/LopInsertBoundary.java new file mode 100644 index 000000000..30575c63e --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopInsertBoundary.java @@ -0,0 +1,232 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.transcoders.LongTranscoder; + +public class LopInsertBoundary extends BaseIntegrationTest { + + private String key = "LopInsertBoundary"; + + private Long[] items9 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L }; + + protected void setUp() throws Exception { + super.setUp(); + + deleteList(key, 1000); + addToList(key, items9); + + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + } + + protected void tearDown() { + try { + deleteList(key, 1000); + super.tearDown(); + } catch (Exception e) { + } + } + + public void testLopInsert_IndexOutOfRange() throws Exception { + assertFalse(mc.asyncLopInsert(key, 11, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopInsert_PrependTailTrim() throws Exception { + // Default overflowaction for lists is 'tail_trim' + + // Insert an item + assertTrue(mc.asyncLopInsert(key, 9, 10L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Prepend an item + assertTrue(mc.asyncLopInsert(key, 0, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Check if the last item prepended, and the last one discarded + List rlist = mc.asyncLopGet(key, 0, 100, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(10, rlist.size()); + assertEquals(11L, rlist.get(0).longValue()); + assertEquals(9L, rlist.get(9).longValue()); // tail_trimmed when + // prepending + } + + public void testLopInsert_PrependHeadTrim() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // Insert an item to make the list full + assertTrue(mc.asyncLopInsert(key, 9, 10L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Prepend an item + assertTrue(mc.asyncLopInsert(key, 0, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Check if the last item prepended, and the last one discarded + List rlist = mc.asyncLopGet(key, 0, 100, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(10, rlist.size()); + assertEquals(11L, rlist.get(0).longValue()); + assertEquals(9L, rlist.get(9).longValue()); // tail_trimmed when + // prepending + } + + public void testLopInsert_PrependOverflowError() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + // Insert an item to make the list full + mc.asyncLopInsert(key, 9, 10L, null).get(1000, TimeUnit.MILLISECONDS); + + // Prepend an item (FAILED) + assertFalse(mc.asyncLopInsert(key, 0, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopInsert_AppendTailTrim() throws Exception { + // Default overflowaction for lists is 'tail_trim' + + // Insert an item + assertTrue(mc.asyncLopInsert(key, 9, 10L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Prepend an item + assertTrue(mc.asyncLopInsert(key, -1, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Check if the last item prepended, and the last one discarded + List rlist = mc.asyncLopGet(key, 0, 100, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(10, rlist.size()); + assertEquals(2L, rlist.get(0).longValue()); // head_trimmed when + // appending + assertEquals(11L, rlist.get(9).longValue()); + } + + public void testLopInsert_AppendHeadTrim() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // Insert an item to make the list full + assertTrue(mc.asyncLopInsert(key, 9, 10L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Prepend an item + assertTrue(mc.asyncLopInsert(key, -1, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Check if the last item prepended, and the last one discarded + List rlist = mc.asyncLopGet(key, 0, 100, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(10, rlist.size()); + assertEquals(2L, rlist.get(0).longValue()); // head_trimmed when + // appending + assertEquals(11L, rlist.get(9).longValue()); + } + + public void testLopInsert_AppendOverflowError() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + // Insert an item to make the list full + mc.asyncLopInsert(key, 9, 10L, null).get(1000, TimeUnit.MILLISECONDS); + + // Prepend an item (FAILED) + assertFalse(mc.asyncLopInsert(key, -1, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopInsert_InsertTailTrim() throws Exception { + // Default overflowaction for lists is 'tail_trim' + + // Insert an item + assertTrue(mc.asyncLopInsert(key, 9, 10L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Insert an item + assertTrue(mc.asyncLopInsert(key, 5, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Check if the last item prepended, and the last one discarded + List rlist = mc.asyncLopGet(key, 0, 100, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(10, rlist.size()); + assertEquals(1L, rlist.get(0).longValue()); + assertEquals(9L, rlist.get(9).longValue()); // tail_trimmed + } + + public void testLopInsert_InsertHeadTrim() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // Insert an item to make the list full + assertTrue(mc.asyncLopInsert(key, 9, 10L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Insert an item + assertTrue(mc.asyncLopInsert(key, 5, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + + // Check if the last item prepended, and the last one discarded + List rlist = mc.asyncLopGet(key, 0, 100, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + + assertEquals(10, rlist.size()); + assertEquals(2L, rlist.get(0).longValue()); // head_trimmed + assertEquals(10L, rlist.get(9).longValue()); + } + + public void testLopInsert_InsertOverflowError() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + // Insert an item to make the list full + mc.asyncLopInsert(key, 9, 10L, null).get(1000, TimeUnit.MILLISECONDS); + + // Prepend an item (FAILED) + assertFalse(mc.asyncLopInsert(key, 0, 11L, null).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopInsert_SetMaxCountUnderCurrentSize() throws Exception { + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopInsertDataType.java b/src/test/manual/net/spy/memcached/collection/list/LopInsertDataType.java new file mode 100644 index 000000000..cd7b3983c --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopInsertDataType.java @@ -0,0 +1,120 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.Date; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class LopInsertDataType extends BaseIntegrationTest { + + private String key = "LopInsertDataType"; + private Random rand = new Random(new Date().getTime()); + + protected void tearDown() { + try { + deleteList(key, 1000); + super.tearDown(); + } catch (Exception e) { + } + } + + public void testLopInsert_ElementCountLimit() throws Exception { + byte[] tooBigByte = new byte[1024 * 1024]; + for (int i = 0; i < tooBigByte.length; i++) { + tooBigByte[i] = (byte) rand.nextInt(255); + } + + try { + mc.asyncLopInsert(key, 0, new String(tooBigByte), + new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS); + fail(); + } catch (Exception e) { + e.printStackTrace(); + assertTrue(e.getMessage().contains("Cannot cache data larger than")); + } + } + + public void testLopInsert_SameDataType() throws Exception { + // First, create a list and insert one item in it + assertTrue(mc.asyncLopInsert(key, 0, "a string", + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS)); + + // Then insert an another item with same data type + assertTrue(mc.asyncLopInsert(key, 1, "an another string", null).get( + 1000, TimeUnit.MILLISECONDS)); + + // Retrieve items from the list and check they're in same data type + List rlist = mc.asyncLopGet(key, 0, 10, false, false).get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(2, rlist.size()); + assertEquals(rlist.get(0).getClass(), rlist.get(1).getClass()); + for (Object each : rlist) { + assertEquals(rlist.get(0).getClass(), each.getClass()); + } + } + + public void testLopInsert_DifferentDataType() throws Exception { + // First, create a list and insert one item in it + assertTrue(mc.asyncLopInsert(key, -1, "a string", + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS)); + + // Then insert another items with different data types + assertTrue(mc.asyncLopInsert(key, -1, new Integer(100), null).get(1000, + TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncLopInsert(key, -1, new Long(101L), null).get(1000, + TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncLopInsert(key, -1, new Character('f'), null).get( + 1000, TimeUnit.MILLISECONDS)); + + // Retrieve items from the list and check they're in same data type + List rlist = mc.asyncLopGet(key, 0, 10, false, false).get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(4, rlist.size()); + for (Object each : rlist) { + assertEquals(rlist.get(0).getClass(), each.getClass()); + } + } + + public void testLopInsert_DifferentDataType_ErrorCase() throws Exception { + // First, create a list and insert one item in it + assertTrue(mc.asyncLopInsert(key, 0, new Character('a'), + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS)); + + // Then insert an another item with different data type + assertTrue(mc.asyncLopInsert(key, 1, "a string", null).get(1000, + TimeUnit.MILLISECONDS)); + + // Retrieve items from the list and check they're in same data type + List rlist = mc.asyncLopGet(key, 0, 10, false, false).get(1000, + TimeUnit.MILLISECONDS); + + assertEquals(2, rlist.size()); + assertNotNull(rlist.get(0)); + assertNull(rlist.get(1)); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyExists.java b/src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyExists.java new file mode 100644 index 000000000..4389b119d --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyExists.java @@ -0,0 +1,92 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.transcoders.LongTranscoder; + +public class LopInsertWhenKeyExists extends BaseIntegrationTest { + + private String key = "LopInsertWhenKeyExists"; + + private Long[] items8 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L }; + private Long[] items9 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L }; + + protected void tearDown() { + try { + deleteList(key, 1000); + super.tearDown(); + } catch (Exception e) { + } + } + + public void testLopInsert_Normal() throws Exception { + // Create a list and add it 9 items + addToList(key, items9); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert one item at index 1 + assertTrue(mc.asyncLopInsert(key, 1, 10L, new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS)); + + // Check inserted item + List rlist = mc.asyncLopGet(key, 0, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10L, rlist.get(1).longValue()); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + } + + public void testLopInsert_SameItem() throws Exception { + // Create a list and add it 8 items + addToList(key, items8); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert 2 items at index 1 + assertTrue(mc.asyncLopInsert(key, 1, 9L, new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS)); + assertTrue(mc.asyncLopInsert(key, 1, 10L, new CollectionAttributes()) + .get(1000, TimeUnit.MILLISECONDS)); + + // Check inserted items + List rlist = mc.asyncLopGet(key, 0, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10L, rlist.get(1).longValue()); + assertEquals(9L, rlist.get(2).longValue()); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyNotExist.java b/src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyNotExist.java new file mode 100644 index 000000000..9e4d16ab7 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopInsertWhenKeyNotExist.java @@ -0,0 +1,149 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class LopInsertWhenKeyNotExist extends BaseIntegrationTest { + + private String key = "LopInsertWhenKeyNotExist"; + + protected void tearDown() { + try { + deleteList(key, 1000); + super.tearDown(); + } catch (Exception e) { + } + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * -1	true	false	null
+	 * 
+ */ + public void testLopInsert_nokey_01() throws Exception { + insertToFail(key, -1, true, null); + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * -1	false	true	not null
+	 * 
+ */ + public void testLopInsert_nokey_02() throws Exception { + boolean success = insertToSucceed(key, -1, false, "some value"); + + assertFalse(success); + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * 0	false	true	not null
+	 * 
+ */ + public void testLopInsert_nokey_03() throws Exception { + assertFalse(insertToSucceed(key, 0, false, "some value")); + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * 0	false	false	not null
+	 * 
+ */ + public void testLopInsert_nokey_04() throws Exception { + assertFalse(insertToSucceed(key, 0, false, "some value")); + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * 0	true	true	not null
+	 * 
+ */ + public void testLopInsert_nokey_05() throws Exception { + assertTrue(insertToSucceed(key, 0, true, "some value")); + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * -1	true	false	not null
+	 * 
+ */ + public void testLopInsert_nokey_06() throws Exception { + assertTrue(insertToSucceed(key, -1, true, "some value")); + } + + /** + *
+	 * INDEX	CREATE	FIXED	VALUE
+	 * count	true	true	not null
+	 * 
+ */ + public void testLopInsert_nokey_07() throws Exception { + // Prepare 3 items + String[] items = { "item01", "item02", "item03" }; + for (String item : items) { + assertTrue(insertToSucceed(key, -1, true, item)); + } + + assertTrue(insertToSucceed(key, items.length, true, "item04")); + } + + boolean insertToFail(String key, int index, boolean createKeyIfNotExists, + Object value) { + boolean result = false; + try { + result = mc + .asyncLopInsert( + key, + index, + value, + ((createKeyIfNotExists) ? new CollectionAttributes() + : null)).get(1000, TimeUnit.MILLISECONDS); + fail("should be failed"); + } catch (Exception e) { + } + return result; + } + + boolean insertToSucceed(String key, int index, + boolean createKeyIfNotExists, Object value) { + boolean result = false; + try { + result = mc + .asyncLopInsert( + key, + index, + value, + ((createKeyIfNotExists) ? new CollectionAttributes() + : null)).get(1000, TimeUnit.MILLISECONDS); + } catch (Exception e) { + e.printStackTrace(); + fail("should not be failed"); + } + return result; + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopOverflowActionTest.java b/src/test/manual/net/spy/memcached/collection/list/LopOverflowActionTest.java new file mode 100644 index 000000000..33952beec --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopOverflowActionTest.java @@ -0,0 +1,217 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; + +public class LopOverflowActionTest extends BaseIntegrationTest { + + private String key = "LopOverflowActionTest"; + + protected void setUp() { + try { + super.setUp(); + mc.delete(key).get(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testLopGet_Maxcount() throws Exception { + // Test + for (int maxcount = 100; maxcount <= 200; maxcount += 100) { + // Create a list + mc.asyncLopInsert(key, 0, "item0", new CollectionAttributes()); + + // Set maxcount + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, + TimeUnit.MILLISECONDS)); + + for (int i = 1; i < maxcount; i++) { + assertTrue(mc.asyncLopInsert(key, i, "item" + i, null).get( + 1000, TimeUnit.MILLISECONDS)); + } + + List result = mc.asyncLopGet(key, 0, maxcount + 10, false, + false).get(10000, TimeUnit.MILLISECONDS); + assertEquals(maxcount, result.size()); + assertTrue(mc.asyncLopDelete(key, 0, 20000, true).get(1000, + TimeUnit.MILLISECONDS)); + } + } + + public void testLopGet_Overflow() throws Exception { + // Create a List + mc.asyncLopInsert(key, 0, "item0", new CollectionAttributes()); + + int maxcount = 100; + + // Set maxcount to 100 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.error); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert more than maxcount + for (int i = 1; i <= maxcount + 10; i++) { + mc.asyncLopInsert(key, -1, "item" + i, null).get(1000, + TimeUnit.MILLISECONDS); + + } + + List result = mc.asyncLopGet(key, 0, maxcount + 10, false, + false).get(10000, TimeUnit.MILLISECONDS); + + // result size should be maxsize(10000) + assertEquals(maxcount, result.size()); + assertEquals("item0", result.get(0)); + assertEquals("item99", result.get(result.size() - 1)); + assertTrue(mc.asyncLopDelete(key, 0, 20000, true).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopGet_HeadTrim() throws Exception { + // Create a List + mc.asyncLopInsert(key, 0, "item0", new CollectionAttributes()); + + int maxcount = 100; + + // Set maxcount to 10000 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.head_trim); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert more than maxcount + for (int i = 1; i <= maxcount + 10; i++) { + assertTrue(mc.asyncLopInsert(key, -1, "item" + i, null).get(1000, + TimeUnit.MILLISECONDS)); + } + + List result = mc.asyncLopGet(key, 0, maxcount + 10, false, + false).get(10000, TimeUnit.MILLISECONDS); + + // result size should be maxsize(10000) + assertEquals(maxcount, result.size()); + assertEquals("item11", result.get(0)); + assertEquals("item110", result.get(result.size() - 1)); + assertTrue(mc.asyncLopDelete(key, 0, 20000, true).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopGet_TailTrim() throws Exception { + // Create a List + mc.asyncLopInsert(key, 0, "item0", new CollectionAttributes()); + + int maxcount = 100; + + // Set maxcount to 10000 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + attrs.setOverflowAction(CollectionOverflowAction.tail_trim); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert more than maxcount + for (int i = 1; i <= maxcount + 10; i++) { + assertTrue(mc.asyncLopInsert(key, 0, "item" + i, null).get(1000, + TimeUnit.MILLISECONDS)); + } + + List result = mc.asyncLopGet(key, 0, maxcount + 10, false, + false).get(10000, TimeUnit.MILLISECONDS); + + // result size should be maxsize(10000) + assertEquals(maxcount, result.size()); + assertEquals("item110", result.get(0)); + assertEquals("item11", result.get(result.size() - 1)); + assertTrue(mc.asyncLopDelete(key, 0, 20000, false).get(1000, + TimeUnit.MILLISECONDS)); + } + + public void testLopGet_HeadTrim_OutOfRange() throws Exception { + // Create a set + mc.asyncLopInsert(key, 1, "item1", new CollectionAttributes()); + + // head_trim + assertFalse(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // test + assertFalse(mc.asyncLopInsert(key, 0, "item0", null).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncLopDelete(key, 0, 10, false).get(1000, TimeUnit.MILLISECONDS); + } + + public void testLopGet_TailTrim_OutOfRange() throws Exception { + // Create a set + mc.asyncLopInsert(key, 1, "item1", new CollectionAttributes()); + + // tail_trim + assertFalse(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.tail_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // test + assertFalse(mc.asyncLopInsert(key, 2, "item2", null).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncLopDelete(key, 0, 10, false).get(1000, TimeUnit.MILLISECONDS); + } + + public void testLopGet_AvailableOverflowAction() throws Exception { + // Create a set + mc.asyncLopInsert(key, 0, "item0", new CollectionAttributes()); + + // Set OverflowAction + // error + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + // head_trim + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // tail_trim + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.tail_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // smallest_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.smallest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // largest_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.largest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncLopDelete(key, 0, true).get(1000, TimeUnit.MILLISECONDS); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/list/LopServerMessageTest.java b/src/test/manual/net/spy/memcached/collection/list/LopServerMessageTest.java new file mode 100644 index 000000000..a43c1318a --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/list/LopServerMessageTest.java @@ -0,0 +1,199 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.list; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.OperationStatus; + +public class LopServerMessageTest extends BaseIntegrationTest { + + private String key = "LopServerMessageTest"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(key).get(); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testNotFound() throws Exception { + CollectionFuture> future = (CollectionFuture>) mc + .asyncLopGet(key, 0, false, false); + assertNull(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("NOT_FOUND", status.getMessage()); + } + + public void testCreatedStored() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("CREATED_STORED", status.getMessage()); + } + + public void testStored() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncLopInsert(key, 1, 1, + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("STORED", status.getMessage()); + } + + public void testOutOfRange() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 1, 1, new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("OUT_OF_RANGE", status.getMessage()); + } + + public void testOutOfRange2() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncLopInsert(key, 1, 1, + new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("OUT_OF_RANGE", status.getMessage()); + } + + public void testOverflowed() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, 0, new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncSetAttr(key, null, 2L, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncLopInsert(key, 0, 1, + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncLopInsert(key, 0, 1, + new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("OVERFLOWED", status.getMessage()); + } + + public void testDeletedDropped() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // delete + future = (CollectionFuture) mc.asyncLopDelete(key, 0, true); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED_DROPPED", status.getMessage()); + } + + public void testDeleted() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert + future = (CollectionFuture) mc.asyncLopInsert(key, -1, "bbb", + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // delete + future = (CollectionFuture) mc.asyncLopDelete(key, 0, false); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED", status.getMessage()); + } + + public void testDeletedDroppedAfterRetrieval() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // get + CollectionFuture> future2 = (CollectionFuture>) mc + .asyncLopGet(key, 0, true, true); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED_DROPPED", status.getMessage()); + } + + public void testDeletedAfterRetrieval() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncLopInsert(key, 0, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert + future = (CollectionFuture) mc.asyncLopInsert(key, -1, "bbb", + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // get + CollectionFuture> future2 = (CollectionFuture>) mc + .asyncLopGet(key, 0, true, false); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED", status.getMessage()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopBulkAPITest.java b/src/test/manual/net/spy/memcached/collection/set/SopBulkAPITest.java new file mode 100644 index 000000000..75b7439c0 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopBulkAPITest.java @@ -0,0 +1,123 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class SopBulkAPITest extends BaseIntegrationTest { + + private String key = "SopBulkAPITest"; + List valueList = new ArrayList(); + + private int getValueCount() { + return mc.getMaxPipedItemCount(); + } + + protected void setUp() throws Exception { + super.setUp(); + for (long i = 0; i < getValueCount(); i++) { + valueList.add("value" + String.valueOf(i)); + } + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testBulk() throws Exception { + for (int i = 0; i < 10; i++) { + mc.delete(key).get(); + bulk(); + } + } + + public void bulk() { + try { + Future> future = mc + .asyncSopPipedInsertBulk(key, valueList, + new CollectionAttributes()); + + Map map = future.get(10000, + TimeUnit.MILLISECONDS); + + Set set = mc + .asyncSopGet(key, getValueCount(), false, false).get(); + + assertEquals(getValueCount(), set.size()); + assertEquals(0, map.size()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBulkFailed() { + try { + for (Object v : valueList) { + mc.asyncSopDelete(key, v, false).get(); + } + + mc.asyncSopInsert(key, "value1", new CollectionAttributes()).get(); + + CollectionFuture> future = mc + .asyncSopPipedInsertBulk(key, valueList, + new CollectionAttributes()); + + Map map = future.get(10000, + TimeUnit.MILLISECONDS); + + assertEquals(1, map.size()); + assertFalse(future.getOperationStatus().isSuccess()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBulkEmptySet() { + try { + for (Object v : valueList) { + mc.asyncSopDelete(key, v, false).get(); + } + + CollectionFuture> future = mc + .asyncSopPipedInsertBulk(key, new ArrayList(), + new CollectionAttributes()); + + future.get(10000, TimeUnit.MILLISECONDS); + + Assert.fail(); + } catch (IllegalArgumentException e) { + return; + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + Assert.fail(); + } +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopDeleteTest.java b/src/test/manual/net/spy/memcached/collection/set/SopDeleteTest.java new file mode 100644 index 000000000..160a669ef --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopDeleteTest.java @@ -0,0 +1,67 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.concurrent.ExecutionException; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class SopDeleteTest extends BaseIntegrationTest { + + private static final String KEY = SopDeleteTest.class.getSimpleName(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testSopDelete0() throws InterruptedException, + ExecutionException { + testSopDelete(0); + } + + public void testSopDelete1() throws InterruptedException, + ExecutionException { + testSopDelete(1); + } + + public void testSopDelete(Object element) throws InterruptedException, + ExecutionException { + + Assert.assertNull(mc.asyncSopGet(KEY, 100, false, true).get()); + + Assert.assertTrue(mc.asyncSopInsert(KEY, element, + new CollectionAttributes()).get()); + + Assert.assertNotNull(mc.asyncSopGet(KEY, 100, false, true).get()); + + Assert.assertTrue(mc.asyncSopDelete(KEY, element, true).get()); + + Assert.assertNull(mc.asyncSopGet(KEY, 100, false, true).get()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopExistTest.java b/src/test/manual/net/spy/memcached/collection/set/SopExistTest.java new file mode 100644 index 000000000..f6229a6c6 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopExistTest.java @@ -0,0 +1,78 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.internal.CollectionFuture; + +public class SopExistTest extends BaseIntegrationTest { + + String key = "SopExistTest"; + String value = "value"; + + protected void setUp() throws Exception { + super.setUp(); + mc.delete(key).get(); + } + + public void testExist() throws Exception { + Boolean result = mc.asyncSopInsert(key, value, + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS); + assertTrue(result); + + CollectionFuture future = mc.asyncSopExist(key, value); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + assertEquals(CollectionResponse.EXIST, future.getOperationStatus() + .getResponse()); + } + + public void testNotExist() throws Exception { + Boolean result = mc.asyncSopInsert(key, value, + new CollectionAttributes()).get(1000, TimeUnit.MILLISECONDS); + assertTrue(result); + + CollectionFuture future = mc.asyncSopExist(key, "dummy"); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + assertEquals(CollectionResponse.NOT_EXIST, future.getOperationStatus() + .getResponse()); + } + + public void testNotFound() throws Exception { + CollectionFuture future = mc.asyncSopExist(key, value); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + assertEquals(CollectionResponse.NOT_FOUND, future.getOperationStatus() + .getResponse()); + } + + public void testUnreadabled() throws Exception { + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setReadable(false); + + Boolean result = mc.asyncSopInsert(key, value, attrs).get(1000, + TimeUnit.MILLISECONDS); + assertTrue(result); + + CollectionFuture future = mc.asyncSopExist(key, "dummy"); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + assertEquals(CollectionResponse.UNREADABLE, future.getOperationStatus() + .getResponse()); + } +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyExists.java b/src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyExists.java new file mode 100644 index 000000000..3e20c4a56 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyExists.java @@ -0,0 +1,86 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.transcoders.LongTranscoder; + +public class SopInsertWhenKeyExists extends BaseIntegrationTest { + + private String key = "SopInsertWhenKeyExists"; + + private Long[] items9 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L }; + private Long[] items10 = { 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L }; + + protected void tearDown() { + try { + deleteSet(key, items10); + super.tearDown(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testSopInsert_Normal() throws Exception { + // Create a list and add it 9 items + addToSet(key, items9); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert one item + assertTrue(mc.asyncSopInsert(key, 10L, new CollectionAttributes()).get( + 1000, TimeUnit.MILLISECONDS)); + + // Check inserted item + Set rlist = mc.asyncSopGet(key, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(10, rlist.size()); + assertTrue(rlist.contains(10L)); + + // Check list attributes + CollectionAttributes rattrs = mc.asyncGetAttr(key).get(1000, + TimeUnit.MILLISECONDS); + assertEquals(10, rattrs.getCount().intValue()); + } + + public void testSopInsert_SameItem() throws Exception { + // Create a list and add it 9 items + addToSet(key, items9); + + // Set maxcount to 10 + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(10); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, TimeUnit.MILLISECONDS)); + + // Insert an item same to the last item + mc.asyncSopInsert(key, 9L, new CollectionAttributes()).get(1000, + TimeUnit.MILLISECONDS); + + // Check that item was not inserted + Set rlist = mc.asyncSopGet(key, 10, false, false, + new LongTranscoder()).get(1000, TimeUnit.MILLISECONDS); + assertEquals(9, rlist.size()); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyNotExist.java b/src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyNotExist.java new file mode 100644 index 000000000..ca61ae38d --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopInsertWhenKeyNotExist.java @@ -0,0 +1,109 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class SopInsertWhenKeyNotExist extends BaseIntegrationTest { + + private String key = "SopInsertWhenKeyNotExist"; + + protected void tearDown() { + try { + mc.delete(key).get(); + super.tearDown(); + } catch (Exception e) { + } + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * true	false	null
+	 * 
+ */ + public void testSopInsert_nokey_01() throws Exception { + insertToFail(key, true, false, null); + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * false	true	not null
+	 * 
+ */ + public void testSopInsert_nokey_02() throws Exception { + assertFalse(insertToSucceed(key, false, true, "some value")); + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * false	false	not null
+	 * 
+ */ + public void testSopInsert_nokey_04() throws Exception { + assertFalse(insertToSucceed(key, false, false, "some value")); + } + + /** + *
+	 * CREATE	FIXED	VALUE
+	 * true	true	not null
+	 * 
+ */ + public void testSopInsert_nokey_05() throws Exception { + assertTrue(insertToSucceed(key, true, true, "some value")); + } + + boolean insertToFail(String key, boolean createKeyIfNotExists, + boolean fixed, Object value) { + boolean result = false; + try { + result = mc + .asyncSopInsert( + key, + value, + ((createKeyIfNotExists) ? new CollectionAttributes() + : null)).get(1000, TimeUnit.MILLISECONDS); + fail("should be failed"); + } catch (Exception e) { + } + return result; + } + + boolean insertToSucceed(String key, boolean createKeyIfNotExists, + boolean fixed, Object value) { + boolean result = false; + try { + result = mc + .asyncSopInsert( + key, + value, + ((createKeyIfNotExists) ? new CollectionAttributes() + : null)).get(1000, TimeUnit.MILLISECONDS); + } catch (Exception e) { + e.printStackTrace(); + fail("should not be failed"); + } + return result; + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopOverflowActionTest.java b/src/test/manual/net/spy/memcached/collection/set/SopOverflowActionTest.java new file mode 100644 index 000000000..50478deb7 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopOverflowActionTest.java @@ -0,0 +1,105 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; + +public class SopOverflowActionTest extends BaseIntegrationTest { + + private String key = "SopOverflowActionTest"; + + protected void setUp() { + try { + super.setUp(); + } catch (Exception e) { + e.printStackTrace(); + } + } + + public void testSopGet_Maxcount() throws Exception { + // Test + for (int maxcount = 50000; maxcount <= 10000; maxcount += 1000) { + // Create a B+ Tree + mc.asyncSopInsert(key, "item0", new CollectionAttributes()); + + // Set maxcount + CollectionAttributes attrs = new CollectionAttributes(); + attrs.setMaxCount(maxcount); + assertTrue(mc.asyncSetAttr(key, attrs).get(1000, + TimeUnit.MILLISECONDS)); + + for (int i = 1; i <= maxcount + 1000; i++) { + boolean success = mc.asyncSopInsert(key, "item" + i, + new CollectionAttributes()).get(1000, + TimeUnit.MILLISECONDS); + if (i >= maxcount) { + assertFalse(success); + } + } + + Set result = mc.asyncSopGet(key, maxcount + 1000, false, + false).get(10000, TimeUnit.MILLISECONDS); + + assertEquals(maxcount, result.size()); + assertFalse(result.contains("item" + maxcount)); + + for (int i = 0; i <= maxcount; i++) { + mc.asyncSopDelete(key, "item" + i, false).get(1000, + TimeUnit.MILLISECONDS); + } + } + } + + public void testSopGet_AvailableOverflowAction() throws Exception { + // Create a set + mc.asyncSopInsert(key, "item0", new CollectionAttributes()); + + // Set OverflowAction + // error + assertTrue(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + // head_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.head_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // tail_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.tail_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // smallest_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.smallest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + // largest_trim + assertFalse(mc.asyncSetAttr(key, null, null, + CollectionOverflowAction.largest_trim).get(1000, + TimeUnit.MILLISECONDS)); + + mc.asyncSopDelete(key, "item0", false).get(1000, TimeUnit.MILLISECONDS); + } + +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopPipedExistTest.java b/src/test/manual/net/spy/memcached/collection/set/SopPipedExistTest.java new file mode 100644 index 000000000..13a1bea13 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopPipedExistTest.java @@ -0,0 +1,258 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.internal.CollectionFuture; + +public class SopPipedExistTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE1 = "VALUE1"; + private final String VALUE2 = "VALUE2"; + private final String VALUE3 = "VALUE3"; + private final String VALUE4 = "VALUE4"; + private final String VALUE5 = "VALUE5"; + private final String VALUE6 = "VALUE6"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + }; + + public void testPipedExist() { + try { + Assert.assertTrue(mc.asyncSopCreate(KEY, ElementValueType.STRING, + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE1, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE2, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE3, + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncSopExist(KEY, VALUE1).get()); + Assert.assertTrue(mc.asyncSopExist(KEY, VALUE2).get()); + Assert.assertTrue(mc.asyncSopExist(KEY, VALUE3).get()); + Assert.assertFalse(mc.asyncSopExist(KEY, VALUE4).get()); + + List findValues = new ArrayList(); + findValues.add(VALUE1); + findValues.add(VALUE4); + findValues.add(VALUE2); + findValues.add(VALUE6); + findValues.add(VALUE3); + findValues.add(VALUE5); + + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(future.getOperationStatus().isSuccess()); + + Assert.assertTrue(map.get(VALUE1)); + Assert.assertTrue(map.get(VALUE2)); + Assert.assertTrue(map.get(VALUE3)); + Assert.assertFalse(map.get(VALUE4)); + Assert.assertFalse(map.get(VALUE5)); + Assert.assertFalse(map.get(VALUE6)); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testPipedExistWithOneValue() { + try { + Assert.assertTrue(mc.asyncSopCreate(KEY, ElementValueType.STRING, + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE1, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE2, + new CollectionAttributes()).get()); + Assert.assertTrue(mc.asyncSopInsert(KEY, VALUE3, + new CollectionAttributes()).get()); + + Assert.assertTrue(mc.asyncSopExist(KEY, VALUE1).get()); + Assert.assertTrue(mc.asyncSopExist(KEY, VALUE2).get()); + Assert.assertTrue(mc.asyncSopExist(KEY, VALUE3).get()); + Assert.assertFalse(mc.asyncSopExist(KEY, VALUE4).get()); + + List findValues = new ArrayList(); + findValues.add(VALUE1); + + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(future.getOperationStatus().isSuccess()); + + Assert.assertTrue(map.get(VALUE1)); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testMaxPipedExist() { + try { + List findValues = new ArrayList(); + + // insert items + for (int i = 0; i < mc.getMaxPipedItemCount(); i++) { + findValues.add("VALUE" + i); + + if (i / 2 == 0) + continue; + Assert.assertTrue(mc.asyncSopInsert(KEY, "VALUE" + i, + new CollectionAttributes()).get()); + } + + // exist bulk + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(future.getOperationStatus().isSuccess()); + + for (int i = 0; i < mc.getMaxPipedItemCount(); i++) { + if (i / 2 == 0) { + Assert.assertFalse(map.get("VALUE" + i)); + } else { + Assert.assertTrue(map.get("VALUE" + i)); + } + } + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testPipedExistNotExistsKey() { + try { + List findValues = new ArrayList(); + findValues.add(VALUE1); + findValues.add(VALUE4); + findValues.add(VALUE2); + findValues.add(VALUE6); + findValues.add(VALUE3); + findValues.add(VALUE5); + + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(map.isEmpty()); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.NOT_FOUND, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testPipedExistOneNotExistsKey() { + try { + List findValues = new ArrayList(); + findValues.add(VALUE1); + + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(map.isEmpty()); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.NOT_FOUND, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testPipedExistTypeMismatchedKey() { + try { + Assert.assertTrue(mc.set(KEY, 10, VALUE1).get()); + + List findValues = new ArrayList(); + findValues.add(VALUE1); + findValues.add(VALUE2); + findValues.add(VALUE6); + findValues.add(VALUE3); + findValues.add(VALUE5); + + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(map.isEmpty()); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.TYPE_MISMATCH, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testPipedExistOneTypeMismatchedKey() { + try { + Assert.assertTrue(mc.set(KEY, 10, VALUE1).get()); + + List findValues = new ArrayList(); + findValues.add(VALUE1); + + CollectionFuture> future = mc + .asyncSopPipedExistBulk(KEY, findValues); + + Map map = future.get(); + + Assert.assertTrue(map.isEmpty()); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.TYPE_MISMATCH, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/collection/set/SopServerMessageTest.java b/src/test/manual/net/spy/memcached/collection/set/SopServerMessageTest.java new file mode 100644 index 000000000..c1f23c8e1 --- /dev/null +++ b/src/test/manual/net/spy/memcached/collection/set/SopServerMessageTest.java @@ -0,0 +1,184 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.collection.set; + +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionOverflowAction; +import net.spy.memcached.internal.CollectionFuture; +import net.spy.memcached.ops.OperationStatus; + +public class SopServerMessageTest extends BaseIntegrationTest { + + private String key = "SopServerMessageTest"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.asyncSopDelete(key, "aaa", true); + mc.asyncSopDelete(key, "bbbb", true); + } + + @Override + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testNotFound() throws Exception { + CollectionFuture> future = (CollectionFuture>) mc + .asyncSopGet(key, 1, false, false); + assertNull(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("NOT_FOUND", status.getMessage()); + } + + public void testCreatedStored() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("CREATED_STORED", status.getMessage()); + } + + public void testStored() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncSopInsert(key, "bbbb", + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("STORED", status.getMessage()); + } + + public void testOverflowed() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + assertTrue(mc.asyncSetAttr(key, null, 1L, + CollectionOverflowAction.error) + .get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncSopInsert(key, "bbbb", + new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("OVERFLOWED", status.getMessage()); + } + + public void testElementExists() throws Exception { + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + future = (CollectionFuture) mc.asyncSopInsert(key, "aaa", + new CollectionAttributes()); + assertFalse(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("ELEMENT_EXISTS", status.getMessage()); + } + + public void testDeletedDropped() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // delete + future = (CollectionFuture) mc + .asyncSopDelete(key, "aaa", true); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED_DROPPED", status.getMessage()); + } + + public void testDeleted() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert + future = (CollectionFuture) mc.asyncSopInsert(key, "bbbb", + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // delete + future = (CollectionFuture) mc.asyncSopDelete(key, "aaa", + false); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED", status.getMessage()); + } + + public void testDeletedDroppedAfterRetrieval() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // get + CollectionFuture> future2 = (CollectionFuture>) mc + .asyncSopGet(key, 1, true, true); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED_DROPPED", status.getMessage()); + } + + public void testDeletedAfterRetrieval() throws Exception { + // create + CollectionFuture future = (CollectionFuture) mc + .asyncSopInsert(key, "aaa", new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // insert + future = (CollectionFuture) mc.asyncSopInsert(key, "bbbb", + new CollectionAttributes()); + assertTrue(future.get(1000, TimeUnit.MILLISECONDS)); + + // get + CollectionFuture> future2 = (CollectionFuture>) mc + .asyncSopGet(key, 1, true, false); + assertNotNull(future2.get(1000, TimeUnit.MILLISECONDS)); + + OperationStatus status = future2.getOperationStatus(); + assertNotNull(status); + assertEquals("DELETED", status.getMessage()); + } + +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/BTreeDeleteWithFilterTest.java b/src/test/manual/net/spy/memcached/emptycollection/BTreeDeleteWithFilterTest.java new file mode 100644 index 000000000..2ee549cf8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/BTreeDeleteWithFilterTest.java @@ -0,0 +1,97 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class BTreeDeleteWithFilterTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10L; + private final int VALUE = 1234567890; + + private final String FLAG = "flag"; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + }; + + public void testDeleteWithMatchedFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, FLAG.getBytes()); + + boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + FLAG.getBytes(), VALUE, new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // delete one bkey + Boolean delete = mc.asyncBopDelete(KEY, BKEY, filter, false).get(); + Assert.assertTrue(delete); + + // check attr again + Assert.assertEquals(new Long(0), mc.asyncGetAttr(KEY).get() + .getCount()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDeleteWithUnMatchedFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "aa".getBytes()); + + boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + FLAG.getBytes(), VALUE, new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // delete one bkey + Boolean delete = mc.asyncBopDelete(KEY, BKEY, filter, false).get(); + Assert.assertFalse(delete); + + // check attr again + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/BTreeGetWithFilterTest.java b/src/test/manual/net/spy/memcached/emptycollection/BTreeGetWithFilterTest.java new file mode 100644 index 000000000..8e26acdb7 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/BTreeGetWithFilterTest.java @@ -0,0 +1,195 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.BitWiseOperands; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; + +public class BTreeGetWithFilterTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10L; + private final int VALUE = 1234567890; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + + boolean insertResult = mc.asyncBopInsert(KEY, BKEY, "flag".getBytes(), + VALUE, new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + }; + + public void testGetWithDeleteAndWithoutDropWithFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "flag".getBytes()); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, filter, true, false).get() + .get(BKEY).getValue()); + + // check exists empty btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNotNull(attr); + Assert.assertEquals(new Long(0), attr.getCount()); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNotNull(map); + Assert.assertTrue(map.isEmpty()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithDeleteAndWithDropWithFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "flag".getBytes()); + filter.setBitOperand(BitWiseOperands.AND, "flag".getBytes()); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, filter, true, true).get() + .get(BKEY).getValue()); + + // check btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(attr); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNull(map); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testRangedGetWithtDeleteAndWithoutDropWithFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "flag".getBytes()); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals(VALUE, + mc.asyncBopGet(KEY, BKEY, BKEY, filter, 0, 1, true, false) + .get().get(BKEY).getValue()); + + // check exists empty btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNotNull(attr); + Assert.assertEquals(new Long(0), attr.getCount()); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNotNull(map); + Assert.assertTrue(map.isEmpty()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testRangedGetWithtDeleteAndWithDropWithFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "flag".getBytes()); + filter.setBitOperand(BitWiseOperands.AND, "flag".getBytes()); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals(VALUE, + mc.asyncBopGet(KEY, BKEY, BKEY, filter, 0, 1, true, true) + .get().get(BKEY).getValue()); + + // check btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(attr); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNull(map); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testRangedGetWithtDeleteAndWithDeleteWithFilter() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "flag".getBytes()); + + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals(VALUE, + mc.asyncBopGet(KEY, BKEY, BKEY, filter, 0, 1, true, false) + .get().get(BKEY).getValue()); + + // check btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNotNull(attr); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNotNull(map); + Assert.assertTrue(map.isEmpty()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/CreateEmptyBTreeTest.java b/src/test/manual/net/spy/memcached/emptycollection/CreateEmptyBTreeTest.java new file mode 100644 index 000000000..6eedb35b4 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/CreateEmptyBTreeTest.java @@ -0,0 +1,87 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.CollectionOverflowAction; + +public class CreateEmptyBTreeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateEmptyWithDefaultAttribute() { + try { + // create empty + CollectionAttributes attribute = new CollectionAttributes(); + Boolean insertResult = mc.asyncBopCreate(KEY, + ElementValueType.OTHERS, attribute).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testCreateEmptyWithSpecifiedAttribute() { + try { + // create empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setMaxCount(10000); + attribute.setExpireTime(9999); + attribute.setOverflowAction(CollectionOverflowAction.error); + Boolean insertResult = mc.asyncBopCreate(KEY, + ElementValueType.OTHERS, attribute).get(); + + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(10000), attr.getMaxCount()); + Assert.assertEquals(CollectionOverflowAction.error, + attr.getOverflowAction()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/CreateEmptyListTest.java b/src/test/manual/net/spy/memcached/emptycollection/CreateEmptyListTest.java new file mode 100644 index 000000000..0eda6609d --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/CreateEmptyListTest.java @@ -0,0 +1,88 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.CollectionOverflowAction; + +public class CreateEmptyListTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateEmptyWithDefaultAttribute() { + try { + // create empty + CollectionAttributes attribute = new CollectionAttributes(); + Boolean insertResult = mc.asyncLopCreate(KEY, + ElementValueType.OTHERS, attribute).get(); + + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testCreateEmptyWithSpecifiedAttribute() { + try { + // create empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setMaxCount(10000); + attribute.setExpireTime(9999); + attribute.setOverflowAction(CollectionOverflowAction.error); + Boolean insertResult = mc.asyncLopCreate(KEY, + ElementValueType.OTHERS, attribute).get(); + + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(10000), attr.getMaxCount()); + Assert.assertEquals(CollectionOverflowAction.error, + attr.getOverflowAction()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/CreateEmptySetTest.java b/src/test/manual/net/spy/memcached/emptycollection/CreateEmptySetTest.java new file mode 100644 index 000000000..205332855 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/CreateEmptySetTest.java @@ -0,0 +1,88 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.collection.CollectionOverflowAction; + +public class CreateEmptySetTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testCreateEmptyWithDefaultAttribute() { + try { + // create empty + CollectionAttributes attribute = new CollectionAttributes(); + Boolean insertResult = mc.asyncSopCreate(KEY, + ElementValueType.OTHERS, attribute).get(); + + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(4000), attr.getMaxCount()); + Assert.assertEquals(new Integer(0), attr.getExpireTime()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testCreateEmptyWithSpecifiedAttribute() { + try { + // create empty + CollectionAttributes attribute = new CollectionAttributes(); + attribute.setMaxCount(10000); + attribute.setExpireTime(9999); + attribute.setOverflowAction(CollectionOverflowAction.error); + Boolean insertResult = mc.asyncSopCreate(KEY, + ElementValueType.OTHERS, attribute).get(); + + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + + Assert.assertEquals(new Long(0), attr.getCount()); + Assert.assertEquals(new Long(10000), attr.getMaxCount()); + Assert.assertEquals(CollectionOverflowAction.error, + attr.getOverflowAction()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTest.java b/src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTest.java new file mode 100644 index 000000000..acf00eb6c --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTest.java @@ -0,0 +1,246 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementValueType; +import net.spy.memcached.internal.CollectionFuture; + +public class GetCountBTreeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10L; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testGetBKeyCountFromInvalidKey() { + try { + CollectionFuture future = mc.asyncBopGetItemCount( + "INVALIDKEY", BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.NOT_FOUND, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromInvalidType() { + try { + // insert value into set + Boolean insertResult = mc.asyncSopInsert(KEY, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + // get count from key + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.TYPE_MISMATCH, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountUnreadable() { + try { + CollectionAttributes attributes = new CollectionAttributes(); + attributes.setReadable(false); + + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.STRING, attributes).get(); + Assert.assertTrue(createResult); + + // get count from key + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.UNREADABLE, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromInvalidBKeyType() { + try { + // insert an item + Boolean insertResult = mc.asyncBopInsert(KEY, new byte[] { 0 }, + null, "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + // get count from key + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.BKEY_MISMATCH, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(1), count); + Assert.assertEquals(CollectionResponse.END, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty2() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY + 1, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(2), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountByNotExistsBKey() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY + 3, BKEY + 3, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(0), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountByNotExistsRange() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY + 2, BKEY + 3, ElementFlagFilter.DO_NOT_FILTER); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(0), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTestWithElementFlagFilter.java b/src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTestWithElementFlagFilter.java new file mode 100644 index 000000000..ba3e60d78 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/GetCountBTreeTestWithElementFlagFilter.java @@ -0,0 +1,255 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.CollectionResponse; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; +import net.spy.memcached.internal.CollectionFuture; + +public class GetCountBTreeTestWithElementFlagFilter extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10L; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testGetBKeyCountFromInvalidKey() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.GreaterThan, "1".getBytes()); + + CollectionFuture future = mc.asyncBopGetItemCount( + "INVALIDKEY", BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.NOT_FOUND, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromInvalidType() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // insert value into set + Boolean insertResult = mc.asyncSopInsert(KEY, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + // get count from key + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNull(count); + Assert.assertFalse(future.getOperationStatus().isSuccess()); + Assert.assertEquals(CollectionResponse.TYPE_MISMATCH, future + .getOperationStatus().getResponse()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(1), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty2() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY + 1, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(0), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountFromNotEmpty3() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + "eflag".getBytes(), "value", new CollectionAttributes()) + .get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, + "eflageflag".getBytes(), "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY, BKEY, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(1), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountByNotExistsBKey() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY + 3, BKEY + 3, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(0), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetBKeyCountByNotExistsRange() { + try { + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, "eflag".getBytes()); + + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert two items + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, "value", + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + + Boolean insertResult2 = mc.asyncBopInsert(KEY, BKEY + 1, null, + "value", new CollectionAttributes()).get(); + Assert.assertTrue(insertResult2); + + // check count in attributes + Assert.assertEquals(new Long(2), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get btree item count + CollectionFuture future = mc.asyncBopGetItemCount(KEY, + BKEY + 2, BKEY + 3, filter); + Integer count = future.get(); + Assert.assertNotNull(count); + Assert.assertEquals(new Integer(0), count); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/GetWithDropBTreeTest.java b/src/test/manual/net/spy/memcached/emptycollection/GetWithDropBTreeTest.java new file mode 100644 index 000000000..ff0127a7f --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/GetWithDropBTreeTest.java @@ -0,0 +1,121 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class GetWithDropBTreeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10L; + private final int VALUE = 1234567890; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + + boolean insertResult = mc.asyncBopInsert(KEY, BKEY, null, VALUE, + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + } + + public void testGetWithoutDeleteAndDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=false, drop=true + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + + // check exists + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value again + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + false, false).get().get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithtDeleteAndWithoutDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + true, false).get().get(BKEY).getValue()); + + // check exists empty btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNotNull(attr); + Assert.assertEquals(new Long(0), attr.getCount()); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNotNull(map); + Assert.assertTrue(map.isEmpty()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithtDeleteAndWithDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals( + VALUE, + mc.asyncBopGet(KEY, BKEY, ElementFlagFilter.DO_NOT_FILTER, + true, true).get().get(BKEY).getValue()); + + // check btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(attr); + + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNull(map); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/GetWithDropListTest.java b/src/test/manual/net/spy/memcached/emptycollection/GetWithDropListTest.java new file mode 100644 index 000000000..54b0f2676 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/GetWithDropListTest.java @@ -0,0 +1,113 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.List; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.internal.CollectionFuture; + +public class GetWithDropListTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final int INDEX = 0; + private final int VALUE = 1234567890; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + + boolean insertResult = mc.asyncLopInsert(KEY, INDEX, VALUE, + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + } + + public void testGetWithoutDeleteAndDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=false, drop=true + Assert.assertEquals(VALUE, mc.asyncLopGet(KEY, INDEX, false, false) + .get().get(INDEX)); + + // check exists + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value againg + Assert.assertEquals(VALUE, mc.asyncLopGet(KEY, INDEX, false, false) + .get().get(INDEX)); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithtDeleteAndWithoutDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals(VALUE, mc.asyncLopGet(KEY, INDEX, true, false) + .get().get(INDEX)); + + // check exists empty btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNotNull(attr); + Assert.assertEquals(new Long(0), attr.getCount()); + + // get value again + CollectionFuture> asyncLopGet = mc.asyncLopGet(KEY, + INDEX, false, false); + List list = asyncLopGet.get(); + Assert.assertNotNull(list); + Assert.assertTrue(list.isEmpty()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithtDeleteAndWithDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertEquals(VALUE, mc.asyncLopGet(KEY, INDEX, true, true) + .get().get(INDEX)); + + // check btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(attr); + + List list = mc.asyncLopGet(KEY, INDEX, false, false).get(); + Assert.assertNull(list); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/GetWithDropSetTest.java b/src/test/manual/net/spy/memcached/emptycollection/GetWithDropSetTest.java new file mode 100644 index 000000000..d72a79378 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/GetWithDropSetTest.java @@ -0,0 +1,108 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.Set; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class GetWithDropSetTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final int VALUE = 1234567890; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + + boolean insertResult = mc.asyncSopInsert(KEY, VALUE, + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult); + } + + public void testGetWithoutDeleteAndDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=false, drop=true + Assert.assertTrue(mc.asyncSopGet(KEY, 10, false, false).get() + .contains(VALUE)); + + // check exists + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value again + Assert.assertTrue(mc.asyncSopGet(KEY, 10, false, false).get() + .contains(VALUE)); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithtDeleteAndWithoutDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertTrue(mc.asyncSopGet(KEY, 10, true, false).get() + .contains(VALUE)); + + // check exists empty btree + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNotNull(attr); + Assert.assertEquals(new Long(0), attr.getCount()); + + Set set = mc.asyncSopGet(KEY, 10, false, false).get(); + Assert.assertNotNull(set); + Assert.assertTrue(set.isEmpty()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testGetWithtDeleteAndWithDrop() { + try { + // check attr + Assert.assertEquals(new Long(1), mc.asyncGetAttr(KEY).get() + .getCount()); + + // get value delete=true, drop=false + Assert.assertTrue(mc.asyncSopGet(KEY, 10, true, true).get() + .contains(VALUE)); + + // check exists empty set + CollectionAttributes attr = mc.asyncGetAttr(KEY).get(); + Assert.assertNull(attr); + + Set set = mc.asyncSopGet(KEY, 10, false, false).get(); + Assert.assertNull(set); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/InsertBTreeWithAttrAndEFlagTest.java b/src/test/manual/net/spy/memcached/emptycollection/InsertBTreeWithAttrAndEFlagTest.java new file mode 100644 index 000000000..d5959c7db --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/InsertBTreeWithAttrAndEFlagTest.java @@ -0,0 +1,154 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class InsertBTreeWithAttrAndEFlagTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10; + private final int EXPIRE_TIME_IN_SEC = 1; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testInsertWithAttributeAndWithoutFilter() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, "value", attr).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNull(map); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithDefaultAttributeAndWithoutFilter() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, "value", attr).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithAttributeAndFilter() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + byte[] filter = "0001".getBytes(); + + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, filter, + "value", attr).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNull(map); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithAttributeAndInvalidFilter() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + byte[] filter = "1234567890123456789012345678901234567890" + .getBytes(); + + try { + mc.asyncBopInsert(KEY, BKEY, filter, "value", attr).get(); + } catch (IllegalArgumentException e) { + return; + } + Assert.fail("Something's going wrong."); + } catch (Exception e) { + Assert.fail(e.getMessage()); + } + Assert.fail("Something's going wrong."); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/InsertListWithAttrTest.java b/src/test/manual/net/spy/memcached/emptycollection/InsertListWithAttrTest.java new file mode 100644 index 000000000..2a987a618 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/InsertListWithAttrTest.java @@ -0,0 +1,96 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.List; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class InsertListWithAttrTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final int INDEX = 0; + private final int EXPIRE_TIME_IN_SEC = 1; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testInsertWithAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + Boolean insertResult = mc.asyncLopInsert(KEY, INDEX, "value", attr) + .get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + List list = mc.asyncLopGet(KEY, INDEX, false, false).get(); + Assert.assertNull(list); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithDefaultAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + + Boolean insertResult = mc.asyncLopInsert(KEY, INDEX, "value", attr) + .get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/InsertSetWithAttrTest.java b/src/test/manual/net/spy/memcached/emptycollection/InsertSetWithAttrTest.java new file mode 100644 index 000000000..20658a382 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/InsertSetWithAttrTest.java @@ -0,0 +1,93 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.Set; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; + +public class InsertSetWithAttrTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final int EXPIRE_TIME_IN_SEC = 1; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testInsertWithAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + Boolean insertResult = mc.asyncSopInsert(KEY, "value", attr).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + Set set = mc.asyncSopGet(KEY, 10, false, false).get(); + Assert.assertNull(set); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithDefaultAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + + Boolean insertResult = mc.asyncSopInsert(KEY, "value", attr).get(); + Assert.assertTrue(insertResult); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertBTreeWithAttrTest.java b/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertBTreeWithAttrTest.java new file mode 100644 index 000000000..b717378cc --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertBTreeWithAttrTest.java @@ -0,0 +1,213 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.ByteArrayBKey; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.ElementFlagFilter.CompOperands; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class PipedBulkInsertBTreeWithAttrTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10; + private final int EXPIRE_TIME_IN_SEC = 1; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testInsertWithAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + Map elements = new HashMap(); + for (long i = 1; i < 11; i++) + elements.put(i, 1); + Map insertResult = mc + .asyncBopPipedInsertBulk(KEY, elements, attr).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertNull(map); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithDefaultAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + + Map elements = new HashMap(); + for (long i = 1; i < 11; i++) + elements.put(i, 1); + Map insertResult = mc + .asyncBopPipedInsertBulk(KEY, elements, attr).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithoutAttributeCreate() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + Map elements = new HashMap(); + for (long i = 1; i < 11; i++) + elements.put(i, 1); + Map insertResult = mc + .asyncBopPipedInsertBulk(KEY, elements, + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithoutAttributeDoNotCreate() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + Map elements = new HashMap(); + for (long i = 1; i < 11; i++) + elements.put(i, 1); + Map insertResult = mc + .asyncBopPipedInsertBulk(KEY, elements, null).get(); + Assert.assertEquals(10, insertResult.size()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + assertNull(collectionAttributes); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithEflag() { + try { + byte[] eflag = new byte[] { 0, 1, 0, 1 }; + List> elements = new ArrayList>(); + for (int i = 0; i < 10; i++) + elements.add(new Element(new byte[] { (byte) i }, + "VALUE" + i, eflag)); + + Map map = mc + .asyncBopPipedInsertBulk(KEY, elements, + new CollectionAttributes()).get(); + + Assert.assertTrue(map.isEmpty()); + + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, eflag); + + Map> map2 = mc.asyncBopGet(KEY, + new byte[] { (byte) 0 }, new byte[] { (byte) 9 }, filter, + 0, 100, false, false).get(); + + Assert.assertEquals(10, map2.size()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithEflagLongBkey() { + try { + byte[] eflag = new byte[] { 0, 1, 0, 1 }; + + List> elements = new ArrayList>(); + for (int i = 0; i < 10; i++) + elements.add(new Element(i, "VALUE" + i, eflag)); + + Map map = mc + .asyncBopPipedInsertBulk(KEY, elements, + new CollectionAttributes()).get(); + + Assert.assertTrue(map.isEmpty()); + + ElementFlagFilter filter = new ElementFlagFilter( + CompOperands.Equal, eflag); + + Map> map2 = mc.asyncBopGet(KEY, 0, 10, + filter, 0, 100, false, false).get(); + + Assert.assertEquals(10, map2.size()); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertListWithAttrTest.java b/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertListWithAttrTest.java new file mode 100644 index 000000000..f71ad2719 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertListWithAttrTest.java @@ -0,0 +1,176 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class PipedBulkInsertListWithAttrTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final int INDEX = 0; + private final int EXPIRE_TIME_IN_SEC = 1; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testInsertWithAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, attr).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + Assert.assertEquals(new Long(10), collectionAttributes.getCount()); + + // check values + List list2 = mc.asyncLopGet(KEY, 0, 10, false, false).get(); + for (int i = 0; i < list2.size(); i++) { + Assert.assertEquals(10 - i, list2.get(i)); + } + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + List list = mc.asyncLopGet(KEY, 0, false, false).get(); + Assert.assertNull(list); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithDefaultAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, attr).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + Assert.assertEquals(new Long(10), collectionAttributes.getCount()); + + // check values + List list2 = mc.asyncLopGet(KEY, 0, 10, false, false).get(); + for (int i = 0; i < list2.size(); i++) { + Assert.assertEquals(10 - i, list2.get(i)); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithoutAttributeCreate() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + Assert.assertEquals(new Long(10), collectionAttributes.getCount()); + + // check values + List list2 = mc.asyncLopGet(KEY, 0, 10, false, false).get(); + for (int i = 0; i < list2.size(); i++) { + Assert.assertEquals(10 - i, list2.get(i)); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithoutAttributeDoNotCreate() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, null).get(); + Assert.assertEquals(10, insertResult.size()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + assertNull(collectionAttributes); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertSetWithAttrTest.java b/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertSetWithAttrTest.java new file mode 100644 index 000000000..25998e283 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/PipedBulkInsertSetWithAttrTest.java @@ -0,0 +1,176 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.ops.CollectionOperationStatus; + +public class PipedBulkInsertSetWithAttrTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final int INDEX = 0; + private final int EXPIRE_TIME_IN_SEC = 1; + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testInsertWithAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + attr.setExpireTime(EXPIRE_TIME_IN_SEC); + attr.setMaxCount(3333); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, attr).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(3333), + collectionAttributes.getMaxCount()); + Assert.assertEquals(new Long(10), collectionAttributes.getCount()); + + // check values + List list2 = mc.asyncLopGet(KEY, 0, 10, false, false).get(); + for (int i = 0; i < list2.size(); i++) { + Assert.assertEquals(10 - i, list2.get(i)); + } + + // check expire time + Thread.sleep(EXPIRE_TIME_IN_SEC * 1000L); + List list = mc.asyncLopGet(KEY, 0, false, false).get(); + Assert.assertNull(list); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithDefaultAttribute() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + // insert with create option + CollectionAttributes attr = new CollectionAttributes(); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, attr).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + Assert.assertEquals(new Long(10), collectionAttributes.getCount()); + + // check values + List list2 = mc.asyncLopGet(KEY, 0, 10, false, false).get(); + for (int i = 0; i < list2.size(); i++) { + Assert.assertEquals(10 - i, list2.get(i)); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithoutAttributeCreate() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, + new CollectionAttributes()).get(); + Assert.assertTrue(insertResult.isEmpty()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + Assert.assertEquals(new Long(4000), + collectionAttributes.getMaxCount()); + Assert.assertEquals(new Long(10), collectionAttributes.getCount()); + + // check values + List list2 = mc.asyncLopGet(KEY, 0, 10, false, false).get(); + for (int i = 0; i < list2.size(); i++) { + Assert.assertEquals(10 - i, list2.get(i)); + } + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInsertWithoutAttributeDoNotCreate() { + try { + // check not exists + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + + List valueList = new ArrayList(); + for (int i = 1; i < 11; i++) + valueList.add(i); + + Map insertResult = mc + .asyncLopPipedInsertBulk(KEY, INDEX, valueList, null).get(); + assertEquals(10, insertResult.size()); + + // check attribute + CollectionAttributes collectionAttributes = mc.asyncGetAttr(KEY) + .get(); + assertNull(collectionAttributes); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeDeleteTest.java b/src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeDeleteTest.java new file mode 100644 index 000000000..585a2255c --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeDeleteTest.java @@ -0,0 +1,57 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import junit.framework.TestCase; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BTreeDelete; + +public class ProtocolBTreeDeleteTest extends TestCase { + + public void testStringfy() { + // default setting : dropIfEmpty = true + + Assert.assertEquals("10 drop", + (new BTreeDelete(10, false)).stringify()); + + Assert.assertEquals("10", (new BTreeDelete(10, false, false, + ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10 drop", (new BTreeDelete(10, false, + true, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + + Assert.assertEquals("10..20 1", (new BTreeDelete(10, 20, 1, + false, false, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10..20 1 drop", (new BTreeDelete(10, 20, + 1, false, true, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + + Assert.assertEquals("10 drop noreply", (new BTreeDelete(10, + true)).stringify()); + + Assert.assertEquals("10 noreply", (new BTreeDelete(10, true, + false, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10 drop noreply", (new BTreeDelete(10, + true, true, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + + Assert.assertEquals("10..20 1 noreply", (new BTreeDelete(10, + 20, 1, true, false, ElementFlagFilter.DO_NOT_FILTER)) + .stringify()); + Assert.assertEquals("10..20 1 drop noreply", (new BTreeDelete( + 10, 20, 1, true, true, ElementFlagFilter.DO_NOT_FILTER)) + .stringify()); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeGetTest.java b/src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeGetTest.java new file mode 100644 index 000000000..d93d1d9b6 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/ProtocolBTreeGetTest.java @@ -0,0 +1,60 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import junit.framework.TestCase; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BTreeGet; + +public class ProtocolBTreeGetTest extends TestCase { + + private static final long bkey = 10; + + public void testStringfy() { + // default setting : dropIfEmpty = true + + Assert.assertEquals("10 drop", + (new BTreeGet(bkey, true)).stringify()); + Assert.assertEquals("10", + (new BTreeGet(bkey, false)).stringify()); + + Assert.assertEquals("10 drop", (new BTreeGet(bkey, true, true, + ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10 delete", (new BTreeGet(bkey, true, + false, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10", (new BTreeGet(bkey, false, true, + ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10", (new BTreeGet(bkey, false, false, + ElementFlagFilter.DO_NOT_FILTER)).stringify()); + + Assert.assertEquals("10..20 1 1 drop", (new BTreeGet(10, 20, 1, + 1, true)).stringify()); + Assert.assertEquals("10..20 1 1", (new BTreeGet(10, 20, 1, 1, + false)).stringify()); + + Assert.assertEquals("10..20 1 1 delete", (new BTreeGet(10, 20, + 1, 1, true, false, ElementFlagFilter.DO_NOT_FILTER)) + .stringify()); + Assert.assertEquals("10..20 1 1 drop", (new BTreeGet(10, 20, 1, + 1, true, true, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10..20 1 1", (new BTreeGet(10, 20, 1, 1, + false, true, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + Assert.assertEquals("10..20 1 1", (new BTreeGet(10, 20, 1, 1, + false, false, ElementFlagFilter.DO_NOT_FILTER)).stringify()); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/ProtocolListDeleteTest.java b/src/test/manual/net/spy/memcached/emptycollection/ProtocolListDeleteTest.java new file mode 100644 index 000000000..b2ef13bc5 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/ProtocolListDeleteTest.java @@ -0,0 +1,54 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import junit.framework.TestCase; +import net.spy.memcached.collection.ListDelete; + +public class ProtocolListDeleteTest extends TestCase { + + public void testStringfy() { + // default setting : dropIfEmpty = true + + Assert.assertEquals("10 drop", + (new ListDelete(10, false)).stringify()); + + Assert.assertEquals("10", + (new ListDelete(10, false, false)).stringify()); + Assert.assertEquals("10 drop", + (new ListDelete(10, false, true)).stringify()); + + Assert.assertEquals("10..20", (new ListDelete(10, 20, false, + false)).stringify()); + Assert.assertEquals("10..20 drop", (new ListDelete(10, 20, + false, true)).stringify()); + + Assert.assertEquals("10 drop noreply", + (new ListDelete(10, true)).stringify()); + + Assert.assertEquals("10 noreply", (new ListDelete(10, true, + false)).stringify()); + Assert.assertEquals("10 drop noreply", (new ListDelete(10, + true, true)).stringify()); + + Assert.assertEquals("10..20 noreply", (new ListDelete(10, 20, + true, false)).stringify()); + Assert.assertEquals("10..20 drop noreply", (new ListDelete(10, + 20, true, true)).stringify()); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/ProtocolListGetTest.java b/src/test/manual/net/spy/memcached/emptycollection/ProtocolListGetTest.java new file mode 100644 index 000000000..2400b2a27 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/ProtocolListGetTest.java @@ -0,0 +1,57 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import junit.framework.TestCase; +import net.spy.memcached.collection.ListGet; + +public class ProtocolListGetTest extends TestCase { + + private static final int index = 10; + + public void testStringfy() { + // default setting : dropIfEmpty = true + Assert.assertEquals("10 drop", + (new ListGet(index, true)).stringify()); + Assert.assertEquals("10", + (new ListGet(index, false)).stringify()); + + Assert.assertEquals("10 drop", + (new ListGet(index, true, true)).stringify()); + Assert.assertEquals("10 delete", (new ListGet(index, true, + false)).stringify()); + Assert.assertEquals("10", + (new ListGet(index, false, true)).stringify()); + Assert.assertEquals("10", + (new ListGet(index, false, false)).stringify()); + + Assert.assertEquals("10..20 drop", + (new ListGet(10, 20, true)).stringify()); + Assert.assertEquals("10..20", + (new ListGet(10, 20, false)).stringify()); + + Assert.assertEquals("10..20 delete", (new ListGet(10, 20, true, + false)).stringify()); + Assert.assertEquals("10..20 drop", (new ListGet(10, 20, true, + true)).stringify()); + Assert.assertEquals("10..20", + (new ListGet(10, 20, false, true)).stringify()); + Assert.assertEquals("10..20", + (new ListGet(10, 20, false, false)).stringify()); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/ProtocolSetDeleteTest.java b/src/test/manual/net/spy/memcached/emptycollection/ProtocolSetDeleteTest.java new file mode 100644 index 000000000..2ee97d9b8 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/ProtocolSetDeleteTest.java @@ -0,0 +1,52 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import junit.framework.TestCase; +import net.spy.memcached.collection.SetDelete; + +public class ProtocolSetDeleteTest extends TestCase { + + public void testStringfy() { + // default setting : dropIfEmpty = true + + SetDelete del = new SetDelete("value", false); + del.setData("value".getBytes()); + Assert.assertEquals("5 drop", del.stringify()); + + del = new SetDelete("value", false, false); + del.setData("value".getBytes()); + Assert.assertEquals("5", del.stringify()); + + del = new SetDelete("value", false, true); + del.setData("value".getBytes()); + Assert.assertEquals("5 drop", del.stringify()); + + del = new SetDelete("value", true); + del.setData("value".getBytes()); + Assert.assertEquals("5 drop noreply", del.stringify()); + + del = new SetDelete("value", true, false); + del.setData("value".getBytes()); + Assert.assertEquals("5 noreply", del.stringify()); + + del = new SetDelete("value", true, true); + del.setData("value".getBytes()); + Assert.assertEquals("5 drop noreply", del.stringify()); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/ProtocolSetGetTest.java b/src/test/manual/net/spy/memcached/emptycollection/ProtocolSetGetTest.java new file mode 100644 index 000000000..7037732c0 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/ProtocolSetGetTest.java @@ -0,0 +1,44 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import junit.framework.Assert; +import junit.framework.TestCase; +import net.spy.memcached.collection.SetGet; + +public class ProtocolSetGetTest extends TestCase { + + private static final int count = 10; + + public void testStringfy() { + // default setting : dropIfEmpty = true + + Assert.assertEquals("10 drop", + (new SetGet(count, true)).stringify()); + Assert.assertEquals("10", + (new SetGet(count, false)).stringify()); + + Assert.assertEquals("10 drop", + (new SetGet(count, true, true)).stringify()); + Assert.assertEquals("10 delete", + (new SetGet(count, true, false)).stringify()); + Assert.assertEquals("10", + (new SetGet(count, false, true)).stringify()); + Assert.assertEquals("10", + (new SetGet(count, false, false)).stringify()); + } +} diff --git a/src/test/manual/net/spy/memcached/emptycollection/VariousTypeTest.java b/src/test/manual/net/spy/memcached/emptycollection/VariousTypeTest.java new file mode 100644 index 000000000..1d96e26a3 --- /dev/null +++ b/src/test/manual/net/spy/memcached/emptycollection/VariousTypeTest.java @@ -0,0 +1,363 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.emptycollection; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; +import java.util.Map; + +import junit.framework.Assert; +import net.spy.memcached.collection.Element; +import net.spy.memcached.collection.ElementFlagFilter; +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.collection.CollectionAttributes; +import net.spy.memcached.collection.ElementValueType; + +public class VariousTypeTest extends BaseIntegrationTest { + + private final String KEY = this.getClass().getSimpleName(); + private final long BKEY = 10; + private final CollectionAttributes ATTR = new CollectionAttributes(); + + @Override + protected void setUp() throws Exception { + super.setUp(); + mc.delete(KEY).get(); + Assert.assertNull(mc.asyncGetAttr(KEY).get()); + } + + @Override + protected void tearDown() throws Exception { + mc.delete(KEY).get(); + super.tearDown(); + } + + public void testString() { + try { + String value = "VALUE"; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.STRING, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testLong() { + try { + long value = 1234567890L; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.LONG, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testInteger() { + try { + int value = 1234567890; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.INTEGER, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testBoolean() { + try { + boolean value = false; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.BOOLEAN, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDate() { + try { + Date value = new Date(); + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.DATE, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testByte() { + try { + byte value = 0x00; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.BYTE, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testFloat() { + try { + float value = 1234567890; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.FLOAT, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDouble() { + try { + double value = 1234567890; + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.DOUBLE, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertEquals(value, map.get(BKEY).getValue()); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testByteArray() { + try { + byte[] value = "value".getBytes(); + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.BYTEARRAY, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertTrue(Arrays.equals(value, (byte[]) map.get(BKEY) + .getValue())); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testOtherObjects() { + try { + UserDefinedClass value = new UserDefinedClass(); + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.OTHERS, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + Assert.assertTrue(value.equals(map.get(BKEY).getValue())); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testList() { + try { + List value = new ArrayList(); + value.add("Hello"); + value.add("Netspider"); + + // create empty + Boolean createResult = mc.asyncBopCreate(KEY, + ElementValueType.OTHERS, ATTR).get(); + Assert.assertTrue(createResult); + + // insert value + Boolean insertResult = mc.asyncBopInsert(KEY, BKEY, + ElementFlagFilter.EMPTY_ELEMENT_FLAG, value, ATTR).get(); + Assert.assertTrue(insertResult); + + // get value + Map> map = mc.asyncBopGet(KEY, BKEY, + ElementFlagFilter.DO_NOT_FILTER, false, false).get(); + + @SuppressWarnings("unchecked") + List r = (List) map.get(BKEY).getValue(); + + Assert.assertEquals(2, r.size()); + Assert.assertEquals("Hello", r.get(0)); + Assert.assertEquals("Netspider", r.get(1)); + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + private static final class UserDefinedClass implements Serializable { + private static final long serialVersionUID = 8942558579188233740L; + + public int i; + public List list; + + public UserDefinedClass() { + this.i = 100; + this.list = new ArrayList(); + this.list.add("Hello"); + this.list.add("Netspider"); + } + + public boolean equals(Object o) { + if (!(o instanceof UserDefinedClass)) + return false; + + UserDefinedClass c = (UserDefinedClass) o; + + boolean eq = this.i == c.i; + + if (this.list == null && c.list == null) + return eq; + + eq &= this.list.size() == c.list.size(); + + if (!eq) + return eq; + + for (int i = 0; i < this.list.size(); i++) { + eq &= this.list.get(i).equals(c.list.get(i)); + if (!eq) + return eq; + } + + return eq; + } + } +} diff --git a/src/test/manual/net/spy/memcached/flushbyprefix/FlushByPrefixTest.java b/src/test/manual/net/spy/memcached/flushbyprefix/FlushByPrefixTest.java new file mode 100644 index 000000000..9ae4a2732 --- /dev/null +++ b/src/test/manual/net/spy/memcached/flushbyprefix/FlushByPrefixTest.java @@ -0,0 +1,139 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.flushbyprefix; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import net.spy.memcached.collection.BaseIntegrationTest; +import net.spy.memcached.internal.OperationFuture; + +public class FlushByPrefixTest extends BaseIntegrationTest { + + private final String PREFIX = "prefix"; + private final String DELIMITER = ":"; + private final String KEY = this.getClass().getSimpleName(); + private final String VALUE = "value"; + + public void testFlushByPrefix() { + try { + Boolean setResult = mc.set(PREFIX + DELIMITER + KEY, 60, VALUE) + .get(); + assertTrue(setResult); + Object object = mc.asyncGet(PREFIX + DELIMITER + KEY).get(); + assertEquals(VALUE, object); + + Boolean flushResult = mc.flush("prefix").get(Long.MAX_VALUE, TimeUnit.MILLISECONDS); + assertTrue(flushResult); + + Object object2 = mc.asyncGet(PREFIX + DELIMITER + KEY).get(); + assertNull(object2); + } catch (Exception e) { + fail(e.getMessage()); + e.printStackTrace(); + } + } + + public void testFlushByPrefix1Depth() { + try { + for (int i = 0; i < 10; i++) { + Boolean setResult = mc.set(PREFIX + DELIMITER + KEY + i, 60, + VALUE).get(); + assertTrue(setResult); + Object object = mc.asyncGet(PREFIX + DELIMITER + KEY + i).get(); + assertEquals(VALUE, object); + } + + Boolean flushResult = mc.flush("prefix").get(); + assertTrue(flushResult); + + for (int i = 0; i < 10; i++) { + Object object2 = mc.asyncGet(PREFIX + DELIMITER + KEY + i) + .get(); + assertNull(object2); + } + } catch (Exception e) { + fail(e.getMessage()); + e.printStackTrace(); + } + } + + public void testFlushByMultiPrefix() { + try { + for (int i = 0; i < 10; i++) { + for (int prefix2 = 0; prefix2 < 10; prefix2++) { + Boolean setResult = mc.set( + PREFIX + DELIMITER + prefix2 + DELIMITER + KEY + i, + 60, VALUE).get(); + assertTrue(setResult); + Object object = mc.asyncGet( + PREFIX + DELIMITER + prefix2 + DELIMITER + KEY + i) + .get(); + assertEquals(VALUE, object); + } + } + + Boolean flushResult = mc.flush("prefix").get(); + assertTrue(flushResult); + + for (int i = 0; i < 10; i++) { + for (int prefix2 = 0; prefix2 < 10; prefix2++) { + Object object2 = mc.asyncGet( + PREFIX + DELIMITER + prefix2 + DELIMITER + KEY + i) + .get(); + assertNull(object2); + } + } + } catch (Exception e) { + fail(e.getMessage()); + e.printStackTrace(); + } + } + + public void testTimeout() { + OperationFuture flushFuture = null; + try { + for (int i = 0; i < 100; i++) { + for (int prefix2 = 0; prefix2 < 10; prefix2++) { + Boolean setResult = mc.set( + PREFIX + DELIMITER + prefix2 + DELIMITER + KEY + i, + 60, VALUE).get(); + assertTrue(setResult); + Object object = mc.asyncGet( + PREFIX + DELIMITER + prefix2 + DELIMITER + KEY + i) + .get(); + assertEquals(VALUE, object); + } + } + + flushFuture = mc.flush("prefix"); + + Boolean flushResult = flushFuture.get(1L, TimeUnit.NANOSECONDS); + + assertTrue(flushResult); + + fail("There's no timeout."); + } catch (TimeoutException e) { + if (flushFuture != null) + flushFuture.cancel(true); + } catch (Exception e) { + fail(e.getMessage()); + e.printStackTrace(); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/frontcache/LocalCacheManagerTest.java b/src/test/manual/net/spy/memcached/frontcache/LocalCacheManagerTest.java new file mode 100644 index 000000000..62ed4e5e3 --- /dev/null +++ b/src/test/manual/net/spy/memcached/frontcache/LocalCacheManagerTest.java @@ -0,0 +1,228 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.frontcache; + +import java.util.Map; +import java.util.concurrent.Future; + +import org.junit.Ignore; + +import junit.framework.TestCase; +import net.spy.memcached.ArcusClient; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.internal.BulkFuture; +import net.spy.memcached.plugin.LocalCacheManager; +import net.spy.memcached.transcoders.Transcoder; + +@Ignore +public class LocalCacheManagerTest extends TestCase { + + private ArcusClient client; + + // put keys + String[] keys = { "key0", "key1", "key2", "key3", "key4", "key5", "key6", + "key7", "key8", "key9" }; + + protected void setUp() throws Exception { + ConnectionFactoryBuilder cfb = new ConnectionFactoryBuilder(); + cfb.setFrontCacheExpireTime(5); + cfb.setMaxFrontCacheElements(10); + client = ArcusClient.createArcusClient("127.0.0.1:2181", "test", cfb); + } + + protected void tearDown() throws Exception { + client.shutdown(); + } + + public void testGet() throws Exception { + for (String k : keys) { + client.set(k, 2, k + "_value").get(); + } + + Future f = client.asyncGet(keys[0]); + Object result = f.get(); + + Transcoder tc = null; + Object cached = client.getLocalCacheManager().get(keys[0], tc); + + assertNotNull(result); + assertNotNull(cached); + assertSame("not the same result", result, cached); + + // after 3 seconds : remote expired, locally cached + Thread.sleep(3000); + + f = client.asyncGet(keys[0]); + result = f.get(); + + cached = client.getLocalCacheManager().get(keys[0], tc); + + assertNotNull(result); + assertNotNull(cached); + assertEquals("not the same result", result, cached); + + // after another 3 seconds : both remote and local expired + Thread.sleep(3000); + + f = client.asyncGet(keys[0]); + result = f.get(); + + cached = client.getLocalCacheManager().get(keys[0], tc); + + assertNull(result); + assertNull(cached); + } + + public void testGetBulk() throws Exception { + for (String k : keys) { + client.set(k, 2, k + "_value").get(); + } + + // read-through. + Map result = client.getBulk(keys); + + // expecting that the keys are locally cached. + LocalCacheManager lcm = client.getLocalCacheManager(); + for (String k : keys) { + Transcoder tc = null; + Object got = lcm.get(k, tc); + assertNotNull(got); + } + + // after 3 seconds, all keys should be expired. + Thread.sleep(3000); + + // but we have locally cached results. + result = client.getBulk(keys); + assertNotNull(result); + assertTrue(keys.length == result.size()); + + // then after additional 3 seconds, locally cached results should be + // expired. + Thread.sleep(3000); + + for (String k : keys) { + Transcoder tc = null; + Object got = lcm.get(k, tc); + assertNull(got); + } + + result = client.getBulk(keys); + assertNotNull(result); + assertTrue(0 == result.size()); + } + + public void testAsyncGetBulk() throws Exception { + for (String k : keys) { + client.set(k, 2, k + "_value").get(); + } + + // read-through. + BulkFuture> f = client.asyncGetBulk(keys); + Map result = f.get(); + + // expecting that the keys are locally cached. + LocalCacheManager lcm = client.getLocalCacheManager(); + for (String k : keys) { + Transcoder tc = null; + Object got = lcm.get(k, tc); + assertNotNull(got); + } + + // after 3 seconds, all keys should be expired. + Thread.sleep(3000); + + // but we have locally cached results. + f = client.asyncGetBulk(keys); + result = f.get(); + assertNotNull(result); + assertTrue(keys.length == result.size()); + + // then after additional 3 seconds, locally cached results should be + // expired. + Thread.sleep(3000); + + for (String k : keys) { + Transcoder tc = null; + Object got = lcm.get(k, tc); + assertNull(got); + } + + f = client.asyncGetBulk(keys); + result = f.get(); + assertNotNull(result); + assertTrue(0 == result.size()); + } + + public void testBulkPartial() throws Exception { + String keySet1[] = new String[keys.length / 2]; + String keySet2[] = new String[keys.length / 2]; + + for (int i = 0; i < keys.length / 2; i++) { + keySet1[i] = keys[i * 2]; + keySet2[i] = keys[i * 2 + 1]; + } + + // Set 1 + for (String k : keySet1) { + client.set(k, 2, k + "_value").get(); + } + + // read-through. + BulkFuture> f = client.asyncGetBulk(keys); + Map result = f.get(); + + // expecting that the keys are locally cached. + LocalCacheManager lcm = client.getLocalCacheManager(); + for (String k : keySet1) { + Transcoder tc = null; + Object got = lcm.get(k, tc); + assertNotNull(got); + } + + // after 3 seconds, put another set of keys + Thread.sleep(3000); + + // Set 2 + for (String k : keySet2) { + client.set(k, 4, k + "_value").get(); + } + + // Set 1 : locally cached + // Set 2 : from the remote cache + f = client.asyncGetBulk(keys); + result = f.get(); + assertNotNull(result); + assertTrue(keys.length == result.size()); + + // then after additional 3 seconds, locally cached Set 1 should be + // expired. + Thread.sleep(3000); + + for (String k : keySet1) { + Transcoder tc = null; + Object got = lcm.get(k, tc); + assertNull(got); + } + + f = client.asyncGetBulk(keys); + result = f.get(); + assertNotNull(result); + assertTrue(keySet2.length == result.size()); + } + +} diff --git a/src/test/manual/net/spy/memcached/test/AuthTest.java b/src/test/manual/net/spy/memcached/test/AuthTest.java new file mode 100644 index 000000000..b0b395795 --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/AuthTest.java @@ -0,0 +1,58 @@ +package net.spy.memcached.test; + +import net.spy.memcached.AddrUtil; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.ConnectionFactoryBuilder.Protocol; +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.compat.SpyObject; + +/** + * Authentication functional test. + */ +public class AuthTest extends SpyObject implements Runnable { + + private final String username; + private final String password; + private MemcachedClient client; + + public AuthTest(String u, String p) { + username = u; + password = p; + } + + public void init() throws Exception { + client = new MemcachedClient(new ConnectionFactoryBuilder() + .setProtocol(Protocol.BINARY) + .setAuthDescriptor(AuthDescriptor.typical(username, password)) + .build(), AddrUtil.getAddresses("localhost:11212")); + } + + public void shutdown() throws Exception { + client.shutdown(); + } + + public void run() { + System.out.println("Available mechs: " + client.listSaslMechanisms()); + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + client.getVersions(); + } + + public static void main(String[] a) throws Exception { + AuthTest lt = new AuthTest("testuser", "testpass"); + lt.init(); + long start = System.currentTimeMillis(); + try { + lt.run(); + } finally { + lt.shutdown(); + } + long end = System.currentTimeMillis(); + System.out.println("Runtime: " + (end - start) + "ms"); + } + +} diff --git a/src/test/manual/net/spy/memcached/test/ExcessivelyLargeGetTest.java b/src/test/manual/net/spy/memcached/test/ExcessivelyLargeGetTest.java new file mode 100644 index 000000000..64262b587 --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/ExcessivelyLargeGetTest.java @@ -0,0 +1,67 @@ +package net.spy.memcached.test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Map; +import java.util.Random; + +import net.spy.memcached.AddrUtil; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.ConnectionFactoryBuilder.Protocol; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.util.CacheLoader; + +/** + * Test a multiget that's sufficiently large as to get data before the + * transision to read. + * + * Note that this is in manual tests currently because, while it predictably + * demonstrates the problems, I don't believe it generally demonstrates good + * behavior for a unit test. + */ +public class ExcessivelyLargeGetTest extends SpyObject implements Runnable { + + // How many keys to do + private static final int N = 25000; + + private final MemcachedClient client; + private final Collection keys; + private final byte[] value = new byte[4096]; + + public ExcessivelyLargeGetTest() throws Exception { + client = new MemcachedClient(new ConnectionFactoryBuilder() + .setProtocol(Protocol.BINARY).setOpTimeout(15000).build(), + AddrUtil.getAddresses("127.0.0.1:11211")); + keys = new ArrayList(N); + new Random().nextBytes(value); + } + + public void run() { + int nullKey = 0; + // Load up a bunch of data. + CacheLoader cl = new CacheLoader(client); + for (int i = 0; i < N; i++) { + String k = "multi." + i; + keys.add(k); + cl.push(k, value); + } + + Map got = client.getBulk(keys); + for (String k : keys) { + if (got.containsKey(k)) { + assert Arrays.equals(value, (byte[]) got.get(k)) : "Incorrect result at " + + k; + } else { + nullKey++; + } + } + System.out.println("Fetched " + got.size() + "/" + keys.size() + " (" + + nullKey + " were null)"); + } + + public static void main(String[] args) throws Exception { + new ExcessivelyLargeGetTest().run(); + } +} diff --git a/src/test/manual/net/spy/memcached/test/LoaderTest.java b/src/test/manual/net/spy/memcached/test/LoaderTest.java new file mode 100644 index 000000000..0f53c4964 --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/LoaderTest.java @@ -0,0 +1,64 @@ +package net.spy.memcached.test; + +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.AddrUtil; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.ConnectionFactoryBuilder.Protocol; +import net.spy.memcached.compat.SpyObject; +import net.spy.memcached.util.CacheLoader; + +/** + * Loader performance test. + */ +public class LoaderTest extends SpyObject implements Runnable { + + private final int count; + private MemcachedClient client; + + public LoaderTest(int c) { + count = c; + } + + public void init() throws Exception { + client = new MemcachedClient(new ConnectionFactoryBuilder() + .setProtocol(Protocol.BINARY).setOpQueueMaxBlockTime(1000) + .build(), AddrUtil.getAddresses("localhost:11211")); + } + + public void shutdown() throws Exception { + client.shutdown(); + } + + public void run() { + CacheLoader cl = new CacheLoader(client); + + Future f = null; + for (int i = 0; i < count; i++) { + f = cl.push("k" + i, "some value"); + } + if (f != null) { + try { + f.get(1, TimeUnit.MINUTES); + } catch (Exception e) { + e.printStackTrace(); + } + } + } + + public static void main(String[] a) throws Exception { + LoaderTest lt = new LoaderTest(1000000); + lt.init(); + long start = System.currentTimeMillis(); + try { + lt.run(); + } finally { + lt.shutdown(); + } + long end = System.currentTimeMillis(); + System.out.println("Runtime: " + (end - start) + "ms"); + } + +} diff --git a/src/test/manual/net/spy/memcached/test/MemcachedThreadBench.java b/src/test/manual/net/spy/memcached/test/MemcachedThreadBench.java new file mode 100644 index 000000000..b6736eb53 --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/MemcachedThreadBench.java @@ -0,0 +1,199 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.test; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.Ignore; + +import junit.framework.TestCase; +import net.spy.memcached.AddrUtil; +import net.spy.memcached.DefaultConnectionFactory; +import net.spy.memcached.MemcachedClient; + +/** + * Adaptation of http://code.google.com/p/spcached/wiki/benchmarktool + */ +@Ignore +public class MemcachedThreadBench extends TestCase { + + private static class WorkerStat { + public int start, runs; + + public long setterTime, getterTime; + + public WorkerStat() { + start = runs = 0; + setterTime = getterTime = 0; + } + } + + public void testCrap() throws Exception { + main(new String[] { "10000", "100", "11211", "100" }); + } + + public static void main(String[] args) throws Exception { + + if (args.length != 4) { + args = new String[] { "1000", "100", "11211", "100" }; + System.out.println("Usage: java " + + MemcachedThreadBench.class.getName() + + " "); + } + + int runs = Integer.parseInt(args[0]); + int start = Integer.parseInt(args[1]); + String serverlist = "127.0.0.1:" + args[2]; + int threads = Integer.parseInt(args[3]); + + MemcachedClient client = new MemcachedClient( + new DefaultConnectionFactory(100000, 32768), + AddrUtil.getAddresses(serverlist)); + + WorkerStat[] statArray = new WorkerStat[threads]; + Thread[] threadArray = new Thread[threads]; + + WorkerStat mainStat = new WorkerStat(); + mainStat.runs = runs * threads; + + long begin = System.currentTimeMillis(); + + for (int i = 0; i < threads; i++) { + statArray[i] = new WorkerStat(); + statArray[i].start = start + i * runs; + statArray[i].runs = runs; + threadArray[i] = new SetterThread(client, statArray[i]); + threadArray[i].start(); + } + + for (int i = 0; i < threads; i++) { + threadArray[i].join(); + } + + mainStat.setterTime = System.currentTimeMillis() - begin; + + begin = System.currentTimeMillis(); + + for (int i = 0; i < threads; i++) { + threadArray[i] = new GetterThread(client, statArray[i]); + threadArray[i].start(); + } + + for (int i = 0; i < threads; i++) { + threadArray[i].join(); + } + + mainStat.getterTime = System.currentTimeMillis() - begin; + + client.shutdown(); + + WorkerStat totalStat = new WorkerStat(); + + System.out.println("Thread\tstart\truns\tset time(ms)\tget time(ms)"); + for (int i = 0; i < threads; i++) { + System.out.println("" + i + "\t" + statArray[i].start + "\t" + + statArray[i].runs + "\t" + statArray[i].setterTime + + "\t\t" + statArray[i].getterTime); + + totalStat.runs = totalStat.runs + statArray[i].runs; + totalStat.setterTime = totalStat.setterTime + + statArray[i].setterTime; + totalStat.getterTime = totalStat.getterTime + + statArray[i].getterTime; + } + + System.out.println("\nAvg\t\t" + runs + "\t" + totalStat.setterTime + / threads + "\t\t" + totalStat.getterTime / threads); + + System.out.println("\nTotal\t\t" + totalStat.runs + "\t" + + totalStat.setterTime + "\t\t" + totalStat.getterTime); + System.out.println("\tReqPerSecond\tset - " + 1000 * totalStat.runs + / totalStat.setterTime + "\tget - " + 1000 * totalStat.runs + / totalStat.getterTime); + + System.out.println("\nMain\t\t" + mainStat.runs + "\t" + + mainStat.setterTime + "\t\t" + mainStat.getterTime); + System.out.println("\tReqPerSecond\tset - " + 1000 * mainStat.runs + / mainStat.setterTime + "\tget - " + 1000 * mainStat.runs + / mainStat.getterTime); + } + + private static class SetterThread extends Thread { + private static final AtomicInteger total = new AtomicInteger(0); + private static final int MAX_QUEUE = 10000; + private final MemcachedClient mc; + private final WorkerStat stat; + + SetterThread(MemcachedClient c, WorkerStat st) { + stat = st; + mc = c; + } + + @Override + public void run() { + String keyBase = "testKey"; + String object = "This is a test of an object blah blah es, " + + "serialization does not seem to slow things down so much. " + + "The gzip compression is horrible horrible performance, " + + "so we only use it for very large objects. " + + "I have not done any heavy benchmarking recently"; + + long begin = System.currentTimeMillis(); + for (int i = stat.start; i < stat.start + stat.runs; i++) { + mc.set("" + i + keyBase, 3600, object); + if (total.incrementAndGet() >= MAX_QUEUE) { + flush(); + } + } + long end = System.currentTimeMillis(); + + stat.setterTime = end - begin; + } + + private synchronized void flush() { + if (total.intValue() >= MAX_QUEUE) { + mc.waitForQueues(5, TimeUnit.SECONDS); + total.set(0); + } + } + } + + private static class GetterThread extends Thread { + private final MemcachedClient mc; + private final WorkerStat stat; + + GetterThread(MemcachedClient c, WorkerStat st) { + stat = st; + mc = c; + } + + @Override + public void run() { + String keyBase = "testKey"; + + long begin = System.currentTimeMillis(); + for (int i = stat.start; i < stat.start + stat.runs; i++) { + String str = (String) mc.get("" + i + keyBase); + assert str != null; + } + long end = System.currentTimeMillis(); + + stat.getterTime = end - begin; + } + } +} diff --git a/src/test/manual/net/spy/memcached/test/MemoryFullTest.java b/src/test/manual/net/spy/memcached/test/MemoryFullTest.java new file mode 100644 index 000000000..32aa7ae2c --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/MemoryFullTest.java @@ -0,0 +1,56 @@ +package net.spy.memcached.test; + +import java.util.Random; +import java.util.concurrent.ExecutionException; + +import net.spy.memcached.AddrUtil; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.ops.OperationErrorType; +import net.spy.memcached.ops.OperationException; + +/** + * Verify what happens when the memory is full on the server. + * + * This test expects a server running on localhost: + * + * memcached -U 11200 -p 11200 -m 32 -M + */ +public class MemoryFullTest { + + public static void main(String args[]) throws Exception { + // Verify assertions + try { + assert false; + throw new RuntimeException("Assertions not enabled."); + } catch (AssertionError e) { + // OK + } + + MemcachedClient c = new MemcachedClient( + AddrUtil.getAddresses("localhost:11200")); + boolean success = false; + Random r = new Random(); + byte[] somebytes = new byte[71849]; + r.nextBytes(somebytes); + try { + for (int i = 0; i < 100000; i++) { + c.set("k" + i, 3600, somebytes).get(); + } + } catch (ExecutionException e) { + assert e.getCause() instanceof OperationException; + OperationException oe = (OperationException) e.getCause(); + assert oe.getType() == OperationErrorType.SERVER; + assert oe.getMessage().equals( + "SERVER_ERROR out of memory storing object"); + success = true; + } finally { + c.shutdown(); + } + if (success) { + System.out.println(":) Failed as expected."); + } else { + System.out.println(":( Unexpected failure."); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/test/MultiNodeFailureTest.java b/src/test/manual/net/spy/memcached/test/MultiNodeFailureTest.java new file mode 100644 index 000000000..3dabb1aee --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/MultiNodeFailureTest.java @@ -0,0 +1,27 @@ +package net.spy.memcached.test; + +import net.spy.memcached.AddrUtil; +import net.spy.memcached.MemcachedClient; + +/** + * This is an attempt to reproduce a problem where a server fails during a + * series of gets. + */ +public class MultiNodeFailureTest { + + public static void main(String args[]) throws Exception { + MemcachedClient c = new MemcachedClient( + AddrUtil.getAddresses("localhost:11200 localhost:11201")); + while (true) { + for (int i = 0; i < 1000; i++) { + try { + c.getBulk("blah1", "blah2", "blah3", "blah4", "blah5"); + } catch (Exception e) { + e.printStackTrace(); + } + } + System.out.println("Did a thousand."); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/test/MutateWithDefaultTest.java b/src/test/manual/net/spy/memcached/test/MutateWithDefaultTest.java new file mode 100644 index 000000000..745f9d26f --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/MutateWithDefaultTest.java @@ -0,0 +1,118 @@ +/* + * arcus-java-client : Arcus Java client + * Copyright 2010-2014 NAVER Corp. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package net.spy.memcached.test; + +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import junit.framework.Assert; +import net.spy.memcached.collection.BaseIntegrationTest; + +public class MutateWithDefaultTest extends BaseIntegrationTest { + + private String key = "MutateWithDefaultTest"; + + protected void setUp() throws Exception { + super.setUp(); + try { + mc.delete(key); + } catch (Exception e) { + e.printStackTrace(); + } + } + + protected void tearDown() throws Exception { + super.tearDown(); + } + + public void testIncr() { + try { + long v; + Future future = mc.asyncIncr(key, 1); + v = future.get(10000, TimeUnit.MILLISECONDS); + assertEquals(v, -1); + + v = mc.incr(key, 1); + assertEquals(v, -1); + + v = mc.incr(key, 10, 2); + assertEquals(v, 2); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDecr() { + try { + long v; + + Future future = mc.asyncDecr(key, 1); + v = future.get(10000, TimeUnit.MILLISECONDS); + assertEquals(v, -1); + + v = mc.decr(key, 1); + assertEquals(v, -1); + + v = mc.decr(key, 10, 100); + assertEquals(v, 100); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testIncrWithDefault() { + try { + + Future future = mc.asyncIncr(key, 1, 100, 10); + long v = future.get(10000, TimeUnit.MILLISECONDS); + + assertEquals(v, 100); + + Future future2 = mc.asyncIncr(key, 1, 100, 10); + long v2 = future2.get(10000, TimeUnit.MILLISECONDS); + + assertEquals(v2, 101); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } + + public void testDecrWithDefault() { + try { + + Future future = mc.asyncDecr(key, 1, 100, 10); + long v = future.get(10000, TimeUnit.MILLISECONDS); + + assertEquals(v, 100); + + Future future2 = mc.asyncDecr(key, 1, 100, 10); + long v2 = future2.get(10000, TimeUnit.MILLISECONDS); + + assertEquals(v2, 99); + + } catch (Exception e) { + e.printStackTrace(); + Assert.fail(e.getMessage()); + } + } +} diff --git a/src/test/manual/net/spy/memcached/test/ObserverToy.java b/src/test/manual/net/spy/memcached/test/ObserverToy.java new file mode 100644 index 000000000..ff0e3d662 --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/ObserverToy.java @@ -0,0 +1,52 @@ +package net.spy.memcached.test; + +import java.net.SocketAddress; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import net.spy.memcached.AddrUtil; +import net.spy.memcached.ConnectionObserver; +import net.spy.memcached.DefaultConnectionFactory; +import net.spy.memcached.MemcachedClient; + +/** + * This expects a server on port 11212 that's somewhat unstable so it can report + * and what-not. + */ +public class ObserverToy { + public static void main(String args[]) throws Exception { + final ConnectionObserver obs = new ConnectionObserver() { + public void connectionEstablished(SocketAddress sa, + int reconnectCount) { + System.out.println("*** Established: " + sa + " count=" + + reconnectCount); + } + + public void connectionLost(SocketAddress sa) { + System.out.println("*** Lost connection: " + sa); + } + + }; + + MemcachedClient c = new MemcachedClient(new DefaultConnectionFactory() { + + @Override + public Collection getInitialObservers() { + return Collections.singleton(obs); + } + + @Override + public boolean isDaemon() { + return false; + } + + }, AddrUtil.getAddresses("localhost:11212")); + + while (true) { + c.waitForQueues(1, TimeUnit.SECONDS); + Thread.sleep(1000); + } + } + +} diff --git a/src/test/manual/net/spy/memcached/test/SASLConnectReconnect.java b/src/test/manual/net/spy/memcached/test/SASLConnectReconnect.java new file mode 100644 index 000000000..2cb67053c --- /dev/null +++ b/src/test/manual/net/spy/memcached/test/SASLConnectReconnect.java @@ -0,0 +1,148 @@ +package net.spy.memcached.test; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.logging.ConsoleHandler; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.Logger; +import net.spy.memcached.AddrUtil; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.ConnectionFactoryBuilder.Protocol; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.OperationTimeoutException; +import net.spy.memcached.auth.AuthDescriptor; +import net.spy.memcached.auth.PlainCallbackHandler; +import static org.junit.Assert.*; + +/** + * A very simple test of using SASL PLAIN auth and ensuring that operations are + * not sent without being authenticated, if a MemcachedClient is told (via the + * ConnectionFactoryBuilder) that it should be authenticating. + * + * @author Matt Ingenthron + */ +public class SASLConnectReconnect { + + private MemcachedClient mc = null; + + SASLConnectReconnect(String username, String password, String host) { + + AuthDescriptor ad = new AuthDescriptor(new String[] { "PLAIN" }, + new PlainCallbackHandler(username, password)); + try { + List addresses = AddrUtil.getAddresses(host); + mc = new MemcachedClient( + new ConnectionFactoryBuilder().setProtocol(Protocol.BINARY) + .setAuthDescriptor(ad).build(), addresses); + } catch (IOException ex) { + System.err + .println("Couldn't create a connection, bailing out: \nIOException " + + ex.getMessage()); + if (mc != null) { + mc.shutdown(); + } + } + + } + + /** + * + * The intent of this test is to verify that if MemcachedClient object is + * set up for SASL Auth the operations are only sent after SASL auth has + * been completed, even in a reconnect case. + * + * At the moment, I use external start/restart of the memcached and external + * verification that the behavior was correct. + * + * Example arguments for running this test: username password + * 127.0.0.1:11211 10000 + * + * The initial run does it's thing, then pauses for 30 seconds, while I + * bounce the server. Then it runs the second pass. + * + * @param args + * the command line arguments + * @throws InterruptedException + */ + public static void main(String[] args) throws InterruptedException { + if (args.length != 4) { + System.err + .println("Usage example:\nQuickAuthLoad user password localhost:11212 10000"); + System.exit(1); + } + SASLConnectReconnect m = new SASLConnectReconnect(args[0], args[1], + args[2]); + + Logger.getLogger("net.spy.memcached").setLevel(Level.FINEST); + + // get the top Logger, create it if it doesn't exist, set to FINEST + Logger topLogger = java.util.logging.Logger.getLogger(""); + + Handler consoleHandler = null; + for (Handler handler : topLogger.getHandlers()) { + if (handler instanceof ConsoleHandler) { + consoleHandler = handler; + break; + } + } + + if (consoleHandler == null) { + consoleHandler = new ConsoleHandler(); + topLogger.addHandler(consoleHandler); + } + consoleHandler.setLevel(java.util.logging.Level.FINEST); + + m.verifySetAndGet(); + System.err.println("Pass one done."); + Thread.sleep(60000); + m.verifySetAndGet2(Integer.parseInt(args[3])); + System.err.println("Pass two done."); + + } + + /** + * verify set and get go to the right place + */ + public void verifySetAndGet() { + int iterations = 20; + for (int i = 0; i < iterations; i++) { + mc.set("test" + i, 0, "test" + i); + } + + for (int i = 0; i < iterations; i++) { + Object res = mc.get("test" + i); + assertEquals("test" + i, res); + } + } + + /** + * verify set and get go to the right place + */ + public void verifySetAndGet2(int iterations) { + try { + for (int i = 0; i <= iterations; i++) { + mc.set("me" + i, 0, "me" + i); + } + + for (int i = 0; i < iterations; i++) { + try { + Object res = mc.get("me" + i); + if (res == null) { + System.err.println("me" + i + " was not in the cache."); + } else { + assertEquals("me" + i, res); + } + } catch (OperationTimeoutException ex) { + System.err.println("Operation timeed out, continuing."); + } + } + mc.shutdown(1, TimeUnit.SECONDS); + } catch (Exception ex) { + System.err.println("Bailing out " + ex.toString() + "\n"); + ex.printStackTrace(); + } + } +} diff --git a/src/test/resources/log4j.xml b/src/test/resources/log4j.xml new file mode 100644 index 000000000..199d56640 --- /dev/null +++ b/src/test/resources/log4j.xml @@ -0,0 +1,85 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/xdocs/index.xml b/xdocs/index.xml new file mode 100644 index 000000000..60e45eead --- /dev/null +++ b/xdocs/index.xml @@ -0,0 +1,24 @@ + + + + + + + Dustin Sallings + java memcached client + + + +
+

+ This project is now primarily hosted at + google code + hosting with the source at github. +

+

+ Changelog +

+
+ +