View Javadoc

1   /*
2    * ====================================================================
3    * Licensed to the Apache Software Foundation (ASF) under one
4    * or more contributor license agreements.  See the NOTICE file
5    * distributed with this work for additional information
6    * regarding copyright ownership.  The ASF licenses this file
7    * to you under the Apache License, Version 2.0 (the
8    * "License"); you may not use this file except in compliance
9    * with the License.  You may obtain a copy of the License at
10   *
11   *   http://www.apache.org/licenses/LICENSE-2.0
12   *
13   * Unless required by applicable law or agreed to in writing,
14   * software distributed under the License is distributed on an
15   * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
16   * KIND, either express or implied.  See the License for the
17   * specific language governing permissions and limitations
18   * under the License.
19   * ====================================================================
20   *
21   * This software consists of voluntary contributions made by many
22   * individuals on behalf of the Apache Software Foundation.  For more
23   * information on the Apache Software Foundation, please see
24   * <http://www.apache.org/>.
25   *
26   */
27  package org.apache.http.impl.client.cache;
28  
29  import org.apache.http.annotation.ThreadSafe;
30  
31  import java.util.Map;
32  import java.util.concurrent.ConcurrentHashMap;
33  import java.util.concurrent.ConcurrentMap;
34  
35  /**
36   * Implements a bounded failure cache. The oldest entries are discarded when
37   * the maximum size is exceeded.
38   *
39   * @since 4.3
40   */
41  @ThreadSafe
42  public class DefaultFailureCache implements FailureCache {
43  
44      static final int DEFAULT_MAX_SIZE = 1000;
45      static final int MAX_UPDATE_TRIES = 10;
46  
47      private final int maxSize;
48      private final ConcurrentMap<String, FailureCacheValue> storage;
49  
50      /**
51       * Create a new failure cache with the maximum size of
52       * {@link #DEFAULT_MAX_SIZE}.
53       */
54      public DefaultFailureCache() {
55          this(DEFAULT_MAX_SIZE);
56      }
57  
58      /**
59       * Creates a new failure cache with the specified maximum size.
60       * @param maxSize the maximum number of entries the cache should store
61       */
62      public DefaultFailureCache(final int maxSize) {
63          this.maxSize = maxSize;
64          this.storage = new ConcurrentHashMap<String, FailureCacheValue>();
65      }
66  
67      public int getErrorCount(final String identifier) {
68          if (identifier == null) {
69              throw new IllegalArgumentException("identifier may not be null");
70          }
71          final FailureCacheValue storedErrorCode = storage.get(identifier);
72          return storedErrorCode != null ? storedErrorCode.getErrorCount() : 0;
73      }
74  
75      public void resetErrorCount(final String identifier) {
76          if (identifier == null) {
77              throw new IllegalArgumentException("identifier may not be null");
78          }
79          storage.remove(identifier);
80      }
81  
82      public void increaseErrorCount(final String identifier) {
83          if (identifier == null) {
84              throw new IllegalArgumentException("identifier may not be null");
85          }
86          updateValue(identifier);
87          removeOldestEntryIfMapSizeExceeded();
88      }
89  
90      private void updateValue(final String identifier) {
91          /**
92           * Due to concurrency it is possible that someone else is modifying an
93           * entry before we could write back our updated value. So we keep
94           * trying until it is our turn.
95           *
96           * In case there is a lot of contention on that identifier, a thread
97           * might starve. Thus it gives up after a certain number of failed
98           * update tries.
99           */
100         for (int i = 0; i < MAX_UPDATE_TRIES; i++) {
101             final FailureCacheValue oldValue = storage.get(identifier);
102             if (oldValue == null) {
103                 final FailureCacheValue newValue = new FailureCacheValue(identifier, 1);
104                 if (storage.putIfAbsent(identifier, newValue) == null) {
105                     return;
106                 }
107             }
108             else {
109                 final int errorCount = oldValue.getErrorCount();
110                 if (errorCount == Integer.MAX_VALUE) {
111                     return;
112                 }
113                 final FailureCacheValue newValue = new FailureCacheValue(identifier, errorCount + 1);
114                 if (storage.replace(identifier, oldValue, newValue)) {
115                     return;
116                 }
117             }
118         }
119     }
120 
121     private void removeOldestEntryIfMapSizeExceeded() {
122         if (storage.size() > maxSize) {
123             final FailureCacheValue valueWithOldestTimestamp = findValueWithOldestTimestamp();
124             if (valueWithOldestTimestamp != null) {
125                 storage.remove(valueWithOldestTimestamp.getKey(), valueWithOldestTimestamp);
126             }
127         }
128     }
129 
130     private FailureCacheValue findValueWithOldestTimestamp() {
131         long oldestTimestamp = Long.MAX_VALUE;
132         FailureCacheValue oldestValue = null;
133         for (final Map.Entry<String, FailureCacheValue> storageEntry : storage.entrySet()) {
134             final FailureCacheValue value = storageEntry.getValue();
135             final long creationTimeInNanos = value.getCreationTimeInNanos();
136             if (creationTimeInNanos < oldestTimestamp) {
137                 oldestTimestamp = creationTimeInNanos;
138                 oldestValue = storageEntry.getValue();
139             }
140         }
141         return oldestValue;
142     }
143 }