View Javadoc

1   /*
2    * ====================================================================
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *   http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing,
14   * software distributed under the License is distributed on an
15   * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16   * KIND, either express or implied.  See the License for the
17   * specific language governing permissions and limitations
18   * under the License.
19   * ====================================================================
20   *
21   * This software consists of voluntary contributions made by many
22   * individuals on behalf of the Apache Software Foundation.  For more
23   * information on the Apache Software Foundation, please see
24   * <http://www.apache.org/>.
25   *
26   */
27  package org.apache.http.impl.client.cache;
28  
29  import org.apache.http.annotation.ThreadSafe;
30  
31  import java.util.Map;
32  import java.util.concurrent.ConcurrentHashMap;
33  import java.util.concurrent.ConcurrentMap;
34  
35  /**
36   * Implements a bounded failure cache. The oldest entries are discarded when
37   * the maximum size is exceeded.
38   *
39   * @since 4.3
40   */
41  @ThreadSafe
42  public class DefaultFailureCache implements FailureCache {
43  
44      static final int DEFAULT_MAX_SIZE = 1000;
45      static final int MAX_UPDATE_TRIES = 10;
46  
47      private final int maxSize;
48      private final ConcurrentMap<String, FailureCacheValue> storage;
49  
50      /**
51       * Create a new failure cache with the maximum size of
52       * {@link #DEFAULT_MAX_SIZE}.
53       */
54      public DefaultFailureCache() {
55          this(DEFAULT_MAX_SIZE);
56      }
57  
58      /**
59       * Creates a new failure cache with the specified maximum size.
60       * @param maxSize the maximum number of entries the cache should store
61       */
62      public DefaultFailureCache(final int maxSize) {
63          this.maxSize = maxSize;
64          this.storage = new ConcurrentHashMap<String, FailureCacheValue>();
65      }
66  
67      @Override
68      public int getErrorCount(final String identifier) {
69          if (identifier == null) {
70              throw new IllegalArgumentException("identifier may not be null");
71          }
72          final FailureCacheValue storedErrorCode = storage.get(identifier);
73          return storedErrorCode != null ? storedErrorCode.getErrorCount() : 0;
74      }
75  
76      @Override
77      public void resetErrorCount(final String identifier) {
78          if (identifier == null) {
79              throw new IllegalArgumentException("identifier may not be null");
80          }
81          storage.remove(identifier);
82      }
83  
84      @Override
85      public void increaseErrorCount(final String identifier) {
86          if (identifier == null) {
87              throw new IllegalArgumentException("identifier may not be null");
88          }
89          updateValue(identifier);
90          removeOldestEntryIfMapSizeExceeded();
91      }
92  
93      private void updateValue(final String identifier) {
94          /**
95           * Due to concurrency it is possible that someone else is modifying an
96           * entry before we could write back our updated value. So we keep
97           * trying until it is our turn.
98           *
99           * In case there is a lot of contention on that identifier, a thread
100          * might starve. Thus it gives up after a certain number of failed
101          * update tries.
102          */
103         for (int i = 0; i < MAX_UPDATE_TRIES; i++) {
104             final FailureCacheValue oldValue = storage.get(identifier);
105             if (oldValue == null) {
106                 final FailureCacheValue newValue = new FailureCacheValue(identifier, 1);
107                 if (storage.putIfAbsent(identifier, newValue) == null) {
108                     return;
109                 }
110             }
111             else {
112                 final int errorCount = oldValue.getErrorCount();
113                 if (errorCount == Integer.MAX_VALUE) {
114                     return;
115                 }
116                 final FailureCacheValue newValue = new FailureCacheValue(identifier, errorCount + 1);
117                 if (storage.replace(identifier, oldValue, newValue)) {
118                     return;
119                 }
120             }
121         }
122     }
123 
124     private void removeOldestEntryIfMapSizeExceeded() {
125         if (storage.size() > maxSize) {
126             final FailureCacheValue valueWithOldestTimestamp = findValueWithOldestTimestamp();
127             if (valueWithOldestTimestamp != null) {
128                 storage.remove(valueWithOldestTimestamp.getKey(), valueWithOldestTimestamp);
129             }
130         }
131     }
132 
133     private FailureCacheValue findValueWithOldestTimestamp() {
134         long oldestTimestamp = Long.MAX_VALUE;
135         FailureCacheValue oldestValue = null;
136         for (final Map.Entry<String, FailureCacheValue> storageEntry : storage.entrySet()) {
137             final FailureCacheValue value = storageEntry.getValue();
138             final long creationTimeInNanos = value.getCreationTimeInNanos();
139             if (creationTimeInNanos < oldestTimestamp) {
140                 oldestTimestamp = creationTimeInNanos;
141                 oldestValue = storageEntry.getValue();
142             }
143         }
144         return oldestValue;
145     }
146 }