001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.server.blockmanagement;
019
020import java.util.Arrays;
021import java.util.Iterator;
022import java.util.List;
023
024import com.google.common.annotations.VisibleForTesting;
025
026import org.apache.hadoop.fs.StorageType;
027import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
028import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
029import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
030import org.apache.hadoop.hdfs.server.protocol.StorageReport;
031
032/**
033 * A Datanode has one or more storages. A storage in the Datanode is represented
034 * by this class.
035 */
036public class DatanodeStorageInfo {
037  public static final DatanodeStorageInfo[] EMPTY_ARRAY = {};
038
039  public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) {
040    return toDatanodeInfos(Arrays.asList(storages));
041  }
042  static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
043    final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
044    for(int i = 0; i < storages.size(); i++) {
045      datanodes[i] = storages.get(i).getDatanodeDescriptor();
046    }
047    return datanodes;
048  }
049
050  static DatanodeDescriptor[] toDatanodeDescriptors(
051      DatanodeStorageInfo[] storages) {
052    DatanodeDescriptor[] datanodes = new DatanodeDescriptor[storages.length];
053    for (int i = 0; i < storages.length; ++i) {
054      datanodes[i] = storages[i].getDatanodeDescriptor();
055    }
056    return datanodes;
057  }
058
059  public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
060    String[] storageIDs = new String[storages.length];
061    for(int i = 0; i < storageIDs.length; i++) {
062      storageIDs[i] = storages[i].getStorageID();
063    }
064    return storageIDs;
065  }
066
067  public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) {
068    StorageType[] storageTypes = new StorageType[storages.length];
069    for(int i = 0; i < storageTypes.length; i++) {
070      storageTypes[i] = storages[i].getStorageType();
071    }
072    return storageTypes;
073  }
074
075  public void updateFromStorage(DatanodeStorage storage) {
076    state = storage.getState();
077    storageType = storage.getStorageType();
078  }
079
080  /**
081   * Iterates over the list of blocks belonging to the data-node.
082   */
083  class BlockIterator implements Iterator<BlockInfoContiguous> {
084    private BlockInfoContiguous current;
085
086    BlockIterator(BlockInfoContiguous head) {
087      this.current = head;
088    }
089
090    public boolean hasNext() {
091      return current != null;
092    }
093
094    public BlockInfoContiguous next() {
095      BlockInfoContiguous res = current;
096      current = current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
097      return res;
098    }
099
100    public void remove() {
101      throw new UnsupportedOperationException("Sorry. can't remove.");
102    }
103  }
104
105  private final DatanodeDescriptor dn;
106  private final String storageID;
107  private StorageType storageType;
108  private State state;
109
110  private long capacity;
111  private long dfsUsed;
112  private volatile long remaining;
113  private long blockPoolUsed;
114
115  private volatile BlockInfoContiguous blockList = null;
116  private int numBlocks = 0;
117
118  // The ID of the last full block report which updated this storage.
119  private long lastBlockReportId = 0;
120
121  /** The number of block reports received */
122  private int blockReportCount = 0;
123
124  /**
125   * Set to false on any NN failover, and reset to true
126   * whenever a block report is received.
127   */
128  private boolean heartbeatedSinceFailover = false;
129
130  /**
131   * At startup or at failover, the storages in the cluster may have pending
132   * block deletions from a previous incarnation of the NameNode. The block
133   * contents are considered as stale until a block report is received. When a
134   * storage is considered as stale, the replicas on it are also considered as
135   * stale. If any block has at least one stale replica, then no invalidations
136   * will be processed for this block. See HDFS-1972.
137   */
138  private boolean blockContentsStale = true;
139
140  DatanodeStorageInfo(DatanodeDescriptor dn, DatanodeStorage s) {
141    this.dn = dn;
142    this.storageID = s.getStorageID();
143    this.storageType = s.getStorageType();
144    this.state = s.getState();
145  }
146
147  int getBlockReportCount() {
148    return blockReportCount;
149  }
150
151  void setBlockReportCount(int blockReportCount) {
152    this.blockReportCount = blockReportCount;
153  }
154
155  boolean areBlockContentsStale() {
156    return blockContentsStale;
157  }
158
159  void markStaleAfterFailover() {
160    heartbeatedSinceFailover = false;
161    blockContentsStale = true;
162  }
163
164  void receivedHeartbeat(StorageReport report) {
165    updateState(report);
166    heartbeatedSinceFailover = true;
167  }
168
169  void receivedBlockReport() {
170    if (heartbeatedSinceFailover) {
171      blockContentsStale = false;
172    }
173    blockReportCount++;
174  }
175
176  @VisibleForTesting
177  public void setUtilizationForTesting(long capacity, long dfsUsed,
178                      long remaining, long blockPoolUsed) {
179    this.capacity = capacity;
180    this.dfsUsed = dfsUsed;
181    this.remaining = remaining;
182    this.blockPoolUsed = blockPoolUsed;
183  }
184
185  long getLastBlockReportId() {
186    return lastBlockReportId;
187  }
188
189  void setLastBlockReportId(long lastBlockReportId) {
190    this.lastBlockReportId = lastBlockReportId;
191  }
192
193  State getState() {
194    return this.state;
195  }
196
197  void setState(State state) {
198    this.state = state;
199  }
200
201  boolean areBlocksOnFailedStorage() {
202    return getState() == State.FAILED && numBlocks != 0;
203  }
204
205  String getStorageID() {
206    return storageID;
207  }
208
209  public StorageType getStorageType() {
210    return storageType;
211  }
212
213  long getCapacity() {
214    return capacity;
215  }
216
217  long getDfsUsed() {
218    return dfsUsed;
219  }
220
221  long getRemaining() {
222    return remaining;
223  }
224
225  long getBlockPoolUsed() {
226    return blockPoolUsed;
227  }
228
229  public AddBlockResult addBlock(BlockInfoContiguous b) {
230    // First check whether the block belongs to a different storage
231    // on the same DN.
232    AddBlockResult result = AddBlockResult.ADDED;
233    DatanodeStorageInfo otherStorage =
234        b.findStorageInfo(getDatanodeDescriptor());
235
236    if (otherStorage != null) {
237      if (otherStorage != this) {
238        // The block belongs to a different storage. Remove it first.
239        otherStorage.removeBlock(b);
240        result = AddBlockResult.REPLACED;
241      } else {
242        // The block is already associated with this storage.
243        return AddBlockResult.ALREADY_EXIST;
244      }
245    }
246
247    // add to the head of the data-node list
248    b.addStorage(this);
249    blockList = b.listInsert(blockList, this);
250    numBlocks++;
251    return result;
252  }
253
254  public boolean removeBlock(BlockInfoContiguous b) {
255    blockList = b.listRemove(blockList, this);
256    if (b.removeStorage(this)) {
257      numBlocks--;
258      return true;
259    } else {
260      return false;
261    }
262  }
263
264  int numBlocks() {
265    return numBlocks;
266  }
267  
268  Iterator<BlockInfoContiguous> getBlockIterator() {
269    return new BlockIterator(blockList);
270
271  }
272
273  /**
274   * Move block to the head of the list of blocks belonging to the data-node.
275   * @return the index of the head of the blockList
276   */
277  int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) {
278    blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
279    return curIndex;
280  }
281
282  /**
283   * Used for testing only
284   * @return the head of the blockList
285   */
286  @VisibleForTesting
287  BlockInfoContiguous getBlockListHeadForTesting(){
288    return blockList;
289  }
290
291  void updateState(StorageReport r) {
292    capacity = r.getCapacity();
293    dfsUsed = r.getDfsUsed();
294    remaining = r.getRemaining();
295    blockPoolUsed = r.getBlockPoolUsed();
296  }
297
298  public DatanodeDescriptor getDatanodeDescriptor() {
299    return dn;
300  }
301
302  /** Increment the number of blocks scheduled for each given storage */ 
303  public static void incrementBlocksScheduled(DatanodeStorageInfo... storages) {
304    for (DatanodeStorageInfo s : storages) {
305      s.getDatanodeDescriptor().incrementBlocksScheduled(s.getStorageType());
306    }
307  }
308
309  @Override
310  public boolean equals(Object obj) {
311    if (this == obj) {
312      return true;
313    } else if (obj == null || !(obj instanceof DatanodeStorageInfo)) {
314      return false;
315    }
316    final DatanodeStorageInfo that = (DatanodeStorageInfo)obj;
317    return this.storageID.equals(that.storageID);
318  }
319
320  @Override
321  public int hashCode() {
322    return storageID.hashCode();
323  }
324
325  @Override
326  public String toString() {
327    return "[" + storageType + "]" + storageID + ":" + state + ":" + dn;
328  }
329  
330  StorageReport toStorageReport() {
331    return new StorageReport(
332        new DatanodeStorage(storageID, state, storageType),
333        false, capacity, dfsUsed, remaining, blockPoolUsed);
334  }
335
336  static Iterable<StorageType> toStorageTypes(
337      final Iterable<DatanodeStorageInfo> infos) {
338    return new Iterable<StorageType>() {
339        @Override
340        public Iterator<StorageType> iterator() {
341          return new Iterator<StorageType>() {
342            final Iterator<DatanodeStorageInfo> i = infos.iterator();
343            @Override
344            public boolean hasNext() {return i.hasNext();}
345            @Override
346            public StorageType next() {return i.next().getStorageType();}
347            @Override
348            public void remove() {
349              throw new UnsupportedOperationException();
350            }
351          };
352        }
353      };
354  }
355
356  /** @return the first {@link DatanodeStorageInfo} corresponding to
357   *          the given datanode
358   */
359  static DatanodeStorageInfo getDatanodeStorageInfo(
360      final Iterable<DatanodeStorageInfo> infos,
361      final DatanodeDescriptor datanode) {
362    if (datanode == null) {
363      return null;
364    }
365    for(DatanodeStorageInfo storage : infos) {
366      if (storage.getDatanodeDescriptor() == datanode) {
367        return storage;
368      }
369    }
370    return null;
371  }
372
373  static enum AddBlockResult {
374    ADDED, REPLACED, ALREADY_EXIST;
375  }
376}