001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hdfs.server.namenode; 020 021import java.io.IOException; 022import java.io.InputStream; 023import java.io.OutputStream; 024import java.util.ArrayList; 025import java.util.Iterator; 026import java.util.List; 027import java.util.Map; 028 029import org.apache.commons.logging.Log; 030import org.apache.commons.logging.LogFactory; 031import org.apache.hadoop.HadoopIllegalArgumentException; 032import org.apache.hadoop.classification.InterfaceAudience; 033import org.apache.hadoop.fs.permission.AclEntry; 034import org.apache.hadoop.fs.permission.AclEntryScope; 035import org.apache.hadoop.fs.permission.AclEntryType; 036import org.apache.hadoop.fs.permission.FsAction; 037import org.apache.hadoop.fs.permission.FsPermission; 038import org.apache.hadoop.fs.permission.PermissionStatus; 039import org.apache.hadoop.fs.StorageType; 040import org.apache.hadoop.fs.XAttr; 041import org.apache.hadoop.hdfs.protocol.Block; 042import org.apache.hadoop.hdfs.protocol.HdfsConstants; 043import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; 044import org.apache.hadoop.hdfs.protocolPB.PBHelper; 045import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; 046import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction; 047import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; 048import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.LoaderContext; 049import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf.SaverContext; 050import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; 051import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; 052import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeDirectorySection; 053import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; 054import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto; 055import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrCompactProto; 056import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.XAttrFeatureProto; 057import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeEntryProto; 058import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.QuotaByStorageTypeFeatureProto; 059import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; 060import org.apache.hadoop.hdfs.util.EnumCounters; 061import org.apache.hadoop.hdfs.util.ReadOnlyList; 062 063import com.google.common.base.Preconditions; 064import com.google.common.collect.ImmutableList; 065import com.google.protobuf.ByteString; 066 067@InterfaceAudience.Private 068public final class FSImageFormatPBINode { 069 private final static long USER_GROUP_STRID_MASK = (1 << 24) - 1; 070 private final static int USER_STRID_OFFSET = 40; 071 private final static int GROUP_STRID_OFFSET = 16; 072 private static final Log LOG = LogFactory.getLog(FSImageFormatPBINode.class); 073 074 private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1; 075 private static final int ACL_ENTRY_NAME_OFFSET = 6; 076 private static final int ACL_ENTRY_TYPE_OFFSET = 3; 077 private static final int ACL_ENTRY_SCOPE_OFFSET = 5; 078 private static final int ACL_ENTRY_PERM_MASK = 7; 079 private static final int ACL_ENTRY_TYPE_MASK = 3; 080 private static final int ACL_ENTRY_SCOPE_MASK = 1; 081 private static final FsAction[] FSACTION_VALUES = FsAction.values(); 082 private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES = AclEntryScope 083 .values(); 084 private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES = AclEntryType 085 .values(); 086 087 private static final int XATTR_NAMESPACE_MASK = 3; 088 private static final int XATTR_NAMESPACE_OFFSET = 30; 089 private static final int XATTR_NAME_MASK = (1 << 24) - 1; 090 private static final int XATTR_NAME_OFFSET = 6; 091 092 /* See the comments in fsimage.proto for an explanation of the following. */ 093 private static final int XATTR_NAMESPACE_EXT_OFFSET = 5; 094 private static final int XATTR_NAMESPACE_EXT_MASK = 1; 095 096 private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES = 097 XAttr.NameSpace.values(); 098 099 100 public final static class Loader { 101 public static PermissionStatus loadPermission(long id, 102 final String[] stringTable) { 103 short perm = (short) (id & ((1 << GROUP_STRID_OFFSET) - 1)); 104 int gsid = (int) ((id >> GROUP_STRID_OFFSET) & USER_GROUP_STRID_MASK); 105 int usid = (int) ((id >> USER_STRID_OFFSET) & USER_GROUP_STRID_MASK); 106 return new PermissionStatus(stringTable[usid], stringTable[gsid], 107 new FsPermission(perm)); 108 } 109 110 public static ImmutableList<AclEntry> loadAclEntries( 111 AclFeatureProto proto, final String[] stringTable) { 112 ImmutableList.Builder<AclEntry> b = ImmutableList.builder(); 113 for (int v : proto.getEntriesList()) { 114 int p = v & ACL_ENTRY_PERM_MASK; 115 int t = (v >> ACL_ENTRY_TYPE_OFFSET) & ACL_ENTRY_TYPE_MASK; 116 int s = (v >> ACL_ENTRY_SCOPE_OFFSET) & ACL_ENTRY_SCOPE_MASK; 117 int nid = (v >> ACL_ENTRY_NAME_OFFSET) & ACL_ENTRY_NAME_MASK; 118 String name = stringTable[nid]; 119 b.add(new AclEntry.Builder().setName(name) 120 .setPermission(FSACTION_VALUES[p]) 121 .setScope(ACL_ENTRY_SCOPE_VALUES[s]) 122 .setType(ACL_ENTRY_TYPE_VALUES[t]).build()); 123 } 124 return b.build(); 125 } 126 127 public static ImmutableList<XAttr> loadXAttrs( 128 XAttrFeatureProto proto, final String[] stringTable) { 129 ImmutableList.Builder<XAttr> b = ImmutableList.builder(); 130 for (XAttrCompactProto xAttrCompactProto : proto.getXAttrsList()) { 131 int v = xAttrCompactProto.getName(); 132 int nid = (v >> XATTR_NAME_OFFSET) & XATTR_NAME_MASK; 133 int ns = (v >> XATTR_NAMESPACE_OFFSET) & XATTR_NAMESPACE_MASK; 134 ns |= 135 ((v >> XATTR_NAMESPACE_EXT_OFFSET) & XATTR_NAMESPACE_EXT_MASK) << 2; 136 String name = stringTable[nid]; 137 byte[] value = null; 138 if (xAttrCompactProto.getValue() != null) { 139 value = xAttrCompactProto.getValue().toByteArray(); 140 } 141 b.add(new XAttr.Builder().setNameSpace(XATTR_NAMESPACE_VALUES[ns]) 142 .setName(name).setValue(value).build()); 143 } 144 145 return b.build(); 146 } 147 148 public static ImmutableList<QuotaByStorageTypeEntry> loadQuotaByStorageTypeEntries( 149 QuotaByStorageTypeFeatureProto proto) { 150 ImmutableList.Builder<QuotaByStorageTypeEntry> b = ImmutableList.builder(); 151 for (QuotaByStorageTypeEntryProto quotaEntry : proto.getQuotasList()) { 152 StorageType type = PBHelper.convertStorageType(quotaEntry.getStorageType()); 153 long quota = quotaEntry.getQuota(); 154 b.add(new QuotaByStorageTypeEntry.Builder().setStorageType(type) 155 .setQuota(quota).build()); 156 } 157 return b.build(); 158 } 159 160 public static INodeDirectory loadINodeDirectory(INodeSection.INode n, 161 LoaderContext state) { 162 assert n.getType() == INodeSection.INode.Type.DIRECTORY; 163 INodeSection.INodeDirectory d = n.getDirectory(); 164 165 final PermissionStatus permissions = loadPermission(d.getPermission(), 166 state.getStringTable()); 167 final INodeDirectory dir = new INodeDirectory(n.getId(), n.getName() 168 .toByteArray(), permissions, d.getModificationTime()); 169 final long nsQuota = d.getNsQuota(), dsQuota = d.getDsQuota(); 170 if (nsQuota >= 0 || dsQuota >= 0) { 171 dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature.Builder(). 172 nameSpaceQuota(nsQuota).storageSpaceQuota(dsQuota).build()); 173 } 174 EnumCounters<StorageType> typeQuotas = null; 175 if (d.hasTypeQuotas()) { 176 ImmutableList<QuotaByStorageTypeEntry> qes = 177 loadQuotaByStorageTypeEntries(d.getTypeQuotas()); 178 typeQuotas = new EnumCounters<StorageType>(StorageType.class, 179 HdfsConstants.QUOTA_RESET); 180 for (QuotaByStorageTypeEntry qe : qes) { 181 if (qe.getQuota() >= 0 && qe.getStorageType() != null && 182 qe.getStorageType().supportTypeQuota()) { 183 typeQuotas.set(qe.getStorageType(), qe.getQuota()); 184 } 185 } 186 187 if (typeQuotas.anyGreaterOrEqual(0)) { 188 DirectoryWithQuotaFeature q = dir.getDirectoryWithQuotaFeature(); 189 if (q == null) { 190 dir.addDirectoryWithQuotaFeature(new DirectoryWithQuotaFeature. 191 Builder().typeQuotas(typeQuotas).build()); 192 } else { 193 q.setQuota(typeQuotas); 194 } 195 } 196 } 197 198 if (d.hasAcl()) { 199 int[] entries = AclEntryStatusFormat.toInt(loadAclEntries( 200 d.getAcl(), state.getStringTable())); 201 dir.addAclFeature(new AclFeature(entries)); 202 } 203 if (d.hasXAttrs()) { 204 dir.addXAttrFeature(new XAttrFeature( 205 loadXAttrs(d.getXAttrs(), state.getStringTable()))); 206 } 207 return dir; 208 } 209 210 public static void updateBlocksMap(INodeFile file, BlockManager bm) { 211 // Add file->block mapping 212 final BlockInfoContiguous[] blocks = file.getBlocks(); 213 if (blocks != null) { 214 for (int i = 0; i < blocks.length; i++) { 215 file.setBlock(i, bm.addBlockCollection(blocks[i], file)); 216 } 217 } 218 } 219 220 private final FSDirectory dir; 221 private final FSNamesystem fsn; 222 private final FSImageFormatProtobuf.Loader parent; 223 224 Loader(FSNamesystem fsn, final FSImageFormatProtobuf.Loader parent) { 225 this.fsn = fsn; 226 this.dir = fsn.dir; 227 this.parent = parent; 228 } 229 230 void loadINodeDirectorySection(InputStream in) throws IOException { 231 final List<INodeReference> refList = parent.getLoaderContext() 232 .getRefList(); 233 while (true) { 234 INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry 235 .parseDelimitedFrom(in); 236 // note that in is a LimitedInputStream 237 if (e == null) { 238 break; 239 } 240 INodeDirectory p = dir.getInode(e.getParent()).asDirectory(); 241 for (long id : e.getChildrenList()) { 242 INode child = dir.getInode(id); 243 addToParent(p, child); 244 } 245 for (int refId : e.getRefChildrenList()) { 246 INodeReference ref = refList.get(refId); 247 addToParent(p, ref); 248 } 249 } 250 } 251 252 void loadINodeSection(InputStream in) throws IOException { 253 INodeSection s = INodeSection.parseDelimitedFrom(in); 254 fsn.dir.resetLastInodeId(s.getLastInodeId()); 255 LOG.info("Loading " + s.getNumInodes() + " INodes."); 256 for (int i = 0; i < s.getNumInodes(); ++i) { 257 INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in); 258 if (p.getId() == INodeId.ROOT_INODE_ID) { 259 loadRootINode(p); 260 } else { 261 INode n = loadINode(p); 262 dir.addToInodeMap(n); 263 } 264 } 265 } 266 267 /** 268 * Load the under-construction files section, and update the lease map 269 */ 270 void loadFilesUnderConstructionSection(InputStream in) throws IOException { 271 while (true) { 272 FileUnderConstructionEntry entry = FileUnderConstructionEntry 273 .parseDelimitedFrom(in); 274 if (entry == null) { 275 break; 276 } 277 // update the lease manager 278 INodeFile file = dir.getInode(entry.getInodeId()).asFile(); 279 FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature(); 280 Preconditions.checkState(uc != null); // file must be under-construction 281 fsn.leaseManager.addLease(uc.getClientName(), entry.getFullPath()); 282 } 283 } 284 285 private void addToParent(INodeDirectory parent, INode child) { 286 if (parent == dir.rootDir && FSDirectory.isReservedName(child)) { 287 throw new HadoopIllegalArgumentException("File name \"" 288 + child.getLocalName() + "\" is reserved. Please " 289 + " change the name of the existing file or directory to another " 290 + "name before upgrading to this release."); 291 } 292 // NOTE: This does not update space counts for parents 293 if (!parent.addChild(child)) { 294 return; 295 } 296 dir.cacheName(child); 297 298 if (child.isFile()) { 299 updateBlocksMap(child.asFile(), fsn.getBlockManager()); 300 } 301 } 302 303 private INode loadINode(INodeSection.INode n) { 304 switch (n.getType()) { 305 case FILE: 306 return loadINodeFile(n); 307 case DIRECTORY: 308 return loadINodeDirectory(n, parent.getLoaderContext()); 309 case SYMLINK: 310 return loadINodeSymlink(n); 311 default: 312 break; 313 } 314 return null; 315 } 316 317 private INodeFile loadINodeFile(INodeSection.INode n) { 318 assert n.getType() == INodeSection.INode.Type.FILE; 319 INodeSection.INodeFile f = n.getFile(); 320 List<BlockProto> bp = f.getBlocksList(); 321 short replication = (short) f.getReplication(); 322 LoaderContext state = parent.getLoaderContext(); 323 324 BlockInfoContiguous[] blocks = new BlockInfoContiguous[bp.size()]; 325 for (int i = 0, e = bp.size(); i < e; ++i) { 326 blocks[i] = new BlockInfoContiguous(PBHelper.convert(bp.get(i)), replication); 327 } 328 final PermissionStatus permissions = loadPermission(f.getPermission(), 329 parent.getLoaderContext().getStringTable()); 330 331 final INodeFile file = new INodeFile(n.getId(), 332 n.getName().toByteArray(), permissions, f.getModificationTime(), 333 f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(), 334 (byte)f.getStoragePolicyID()); 335 336 if (f.hasAcl()) { 337 int[] entries = AclEntryStatusFormat.toInt(loadAclEntries( 338 f.getAcl(), state.getStringTable())); 339 file.addAclFeature(new AclFeature(entries)); 340 } 341 342 if (f.hasXAttrs()) { 343 file.addXAttrFeature(new XAttrFeature( 344 loadXAttrs(f.getXAttrs(), state.getStringTable()))); 345 } 346 347 // under-construction information 348 if (f.hasFileUC()) { 349 INodeSection.FileUnderConstructionFeature uc = f.getFileUC(); 350 file.toUnderConstruction(uc.getClientName(), uc.getClientMachine()); 351 if (blocks.length > 0) { 352 BlockInfoContiguous lastBlk = file.getLastBlock(); 353 // replace the last block of file 354 file.setBlock(file.numBlocks() - 1, new BlockInfoContiguousUnderConstruction( 355 lastBlk, replication)); 356 } 357 } 358 return file; 359 } 360 361 362 private INodeSymlink loadINodeSymlink(INodeSection.INode n) { 363 assert n.getType() == INodeSection.INode.Type.SYMLINK; 364 INodeSection.INodeSymlink s = n.getSymlink(); 365 final PermissionStatus permissions = loadPermission(s.getPermission(), 366 parent.getLoaderContext().getStringTable()); 367 INodeSymlink sym = new INodeSymlink(n.getId(), n.getName().toByteArray(), 368 permissions, s.getModificationTime(), s.getAccessTime(), 369 s.getTarget().toStringUtf8()); 370 return sym; 371 } 372 373 private void loadRootINode(INodeSection.INode p) { 374 INodeDirectory root = loadINodeDirectory(p, parent.getLoaderContext()); 375 final QuotaCounts q = root.getQuotaCounts(); 376 final long nsQuota = q.getNameSpace(); 377 final long dsQuota = q.getStorageSpace(); 378 if (nsQuota != -1 || dsQuota != -1) { 379 dir.rootDir.getDirectoryWithQuotaFeature().setQuota(nsQuota, dsQuota); 380 } 381 final EnumCounters<StorageType> typeQuotas = q.getTypeSpaces(); 382 if (typeQuotas.anyGreaterOrEqual(0)) { 383 dir.rootDir.getDirectoryWithQuotaFeature().setQuota(typeQuotas); 384 } 385 dir.rootDir.cloneModificationTime(root); 386 dir.rootDir.clonePermissionStatus(root); 387 // root dir supports having extended attributes according to POSIX 388 final XAttrFeature f = root.getXAttrFeature(); 389 if (f != null) { 390 dir.rootDir.addXAttrFeature(f); 391 } 392 } 393 } 394 395 public final static class Saver { 396 private static long buildPermissionStatus(INodeAttributes n, 397 final SaverContext.DeduplicationMap<String> stringMap) { 398 long userId = stringMap.getId(n.getUserName()); 399 long groupId = stringMap.getId(n.getGroupName()); 400 return ((userId & USER_GROUP_STRID_MASK) << USER_STRID_OFFSET) 401 | ((groupId & USER_GROUP_STRID_MASK) << GROUP_STRID_OFFSET) 402 | n.getFsPermissionShort(); 403 } 404 405 private static AclFeatureProto.Builder buildAclEntries(AclFeature f, 406 final SaverContext.DeduplicationMap<String> map) { 407 AclFeatureProto.Builder b = AclFeatureProto.newBuilder(); 408 for (int pos = 0, e; pos < f.getEntriesSize(); pos++) { 409 e = f.getEntryAt(pos); 410 int nameId = map.getId(AclEntryStatusFormat.getName(e)); 411 int v = ((nameId & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET) 412 | (AclEntryStatusFormat.getType(e).ordinal() << ACL_ENTRY_TYPE_OFFSET) 413 | (AclEntryStatusFormat.getScope(e).ordinal() << ACL_ENTRY_SCOPE_OFFSET) 414 | (AclEntryStatusFormat.getPermission(e).ordinal()); 415 b.addEntries(v); 416 } 417 return b; 418 } 419 420 private static XAttrFeatureProto.Builder buildXAttrs(XAttrFeature f, 421 final SaverContext.DeduplicationMap<String> stringMap) { 422 XAttrFeatureProto.Builder b = XAttrFeatureProto.newBuilder(); 423 for (XAttr a : f.getXAttrs()) { 424 XAttrCompactProto.Builder xAttrCompactBuilder = XAttrCompactProto. 425 newBuilder(); 426 int nsOrd = a.getNameSpace().ordinal(); 427 Preconditions.checkArgument(nsOrd < 8, "Too many namespaces."); 428 int v = ((nsOrd & XATTR_NAMESPACE_MASK) << XATTR_NAMESPACE_OFFSET) 429 | ((stringMap.getId(a.getName()) & XATTR_NAME_MASK) << 430 XATTR_NAME_OFFSET); 431 v |= (((nsOrd >> 2) & XATTR_NAMESPACE_EXT_MASK) << 432 XATTR_NAMESPACE_EXT_OFFSET); 433 xAttrCompactBuilder.setName(v); 434 if (a.getValue() != null) { 435 xAttrCompactBuilder.setValue(PBHelper.getByteString(a.getValue())); 436 } 437 b.addXAttrs(xAttrCompactBuilder.build()); 438 } 439 440 return b; 441 } 442 443 private static QuotaByStorageTypeFeatureProto.Builder 444 buildQuotaByStorageTypeEntries(QuotaCounts q) { 445 QuotaByStorageTypeFeatureProto.Builder b = 446 QuotaByStorageTypeFeatureProto.newBuilder(); 447 for (StorageType t: StorageType.getTypesSupportingQuota()) { 448 if (q.getTypeSpace(t) >= 0) { 449 QuotaByStorageTypeEntryProto.Builder eb = 450 QuotaByStorageTypeEntryProto.newBuilder(). 451 setStorageType(PBHelper.convertStorageType(t)). 452 setQuota(q.getTypeSpace(t)); 453 b.addQuotas(eb); 454 } 455 } 456 return b; 457 } 458 459 public static INodeSection.INodeFile.Builder buildINodeFile( 460 INodeFileAttributes file, final SaverContext state) { 461 INodeSection.INodeFile.Builder b = INodeSection.INodeFile.newBuilder() 462 .setAccessTime(file.getAccessTime()) 463 .setModificationTime(file.getModificationTime()) 464 .setPermission(buildPermissionStatus(file, state.getStringMap())) 465 .setPreferredBlockSize(file.getPreferredBlockSize()) 466 .setReplication(file.getFileReplication()) 467 .setStoragePolicyID(file.getLocalStoragePolicyID()); 468 469 AclFeature f = file.getAclFeature(); 470 if (f != null) { 471 b.setAcl(buildAclEntries(f, state.getStringMap())); 472 } 473 XAttrFeature xAttrFeature = file.getXAttrFeature(); 474 if (xAttrFeature != null) { 475 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); 476 } 477 return b; 478 } 479 480 public static INodeSection.INodeDirectory.Builder buildINodeDirectory( 481 INodeDirectoryAttributes dir, final SaverContext state) { 482 QuotaCounts quota = dir.getQuotaCounts(); 483 INodeSection.INodeDirectory.Builder b = INodeSection.INodeDirectory 484 .newBuilder().setModificationTime(dir.getModificationTime()) 485 .setNsQuota(quota.getNameSpace()) 486 .setDsQuota(quota.getStorageSpace()) 487 .setPermission(buildPermissionStatus(dir, state.getStringMap())); 488 489 if (quota.getTypeSpaces().anyGreaterOrEqual(0)) { 490 b.setTypeQuotas(buildQuotaByStorageTypeEntries(quota)); 491 } 492 493 AclFeature f = dir.getAclFeature(); 494 if (f != null) { 495 b.setAcl(buildAclEntries(f, state.getStringMap())); 496 } 497 XAttrFeature xAttrFeature = dir.getXAttrFeature(); 498 if (xAttrFeature != null) { 499 b.setXAttrs(buildXAttrs(xAttrFeature, state.getStringMap())); 500 } 501 return b; 502 } 503 504 private final FSNamesystem fsn; 505 private final FileSummary.Builder summary; 506 private final SaveNamespaceContext context; 507 private final FSImageFormatProtobuf.Saver parent; 508 509 Saver(FSImageFormatProtobuf.Saver parent, FileSummary.Builder summary) { 510 this.parent = parent; 511 this.summary = summary; 512 this.context = parent.getContext(); 513 this.fsn = context.getSourceNamesystem(); 514 } 515 516 void serializeINodeDirectorySection(OutputStream out) throws IOException { 517 Iterator<INodeWithAdditionalFields> iter = fsn.getFSDirectory() 518 .getINodeMap().getMapIterator(); 519 final ArrayList<INodeReference> refList = parent.getSaverContext() 520 .getRefList(); 521 int i = 0; 522 while (iter.hasNext()) { 523 INodeWithAdditionalFields n = iter.next(); 524 if (!n.isDirectory()) { 525 continue; 526 } 527 528 ReadOnlyList<INode> children = n.asDirectory().getChildrenList( 529 Snapshot.CURRENT_STATE_ID); 530 if (children.size() > 0) { 531 INodeDirectorySection.DirEntry.Builder b = INodeDirectorySection. 532 DirEntry.newBuilder().setParent(n.getId()); 533 for (INode inode : children) { 534 if (!inode.isReference()) { 535 b.addChildren(inode.getId()); 536 } else { 537 refList.add(inode.asReference()); 538 b.addRefChildren(refList.size() - 1); 539 } 540 } 541 INodeDirectorySection.DirEntry e = b.build(); 542 e.writeDelimitedTo(out); 543 } 544 545 ++i; 546 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 547 context.checkCancelled(); 548 } 549 } 550 parent.commitSection(summary, 551 FSImageFormatProtobuf.SectionName.INODE_DIR); 552 } 553 554 void serializeINodeSection(OutputStream out) throws IOException { 555 INodeMap inodesMap = fsn.dir.getINodeMap(); 556 557 INodeSection.Builder b = INodeSection.newBuilder() 558 .setLastInodeId(fsn.dir.getLastInodeId()).setNumInodes(inodesMap.size()); 559 INodeSection s = b.build(); 560 s.writeDelimitedTo(out); 561 562 int i = 0; 563 Iterator<INodeWithAdditionalFields> iter = inodesMap.getMapIterator(); 564 while (iter.hasNext()) { 565 INodeWithAdditionalFields n = iter.next(); 566 save(out, n); 567 ++i; 568 if (i % FSImageFormatProtobuf.Saver.CHECK_CANCEL_INTERVAL == 0) { 569 context.checkCancelled(); 570 } 571 } 572 parent.commitSection(summary, FSImageFormatProtobuf.SectionName.INODE); 573 } 574 575 void serializeFilesUCSection(OutputStream out) throws IOException { 576 Map<String, INodeFile> ucMap = fsn.getFilesUnderConstruction(); 577 for (Map.Entry<String, INodeFile> entry : ucMap.entrySet()) { 578 String path = entry.getKey(); 579 INodeFile file = entry.getValue(); 580 FileUnderConstructionEntry.Builder b = FileUnderConstructionEntry 581 .newBuilder().setInodeId(file.getId()).setFullPath(path); 582 FileUnderConstructionEntry e = b.build(); 583 e.writeDelimitedTo(out); 584 } 585 parent.commitSection(summary, 586 FSImageFormatProtobuf.SectionName.FILES_UNDERCONSTRUCTION); 587 } 588 589 private void save(OutputStream out, INode n) throws IOException { 590 if (n.isDirectory()) { 591 save(out, n.asDirectory()); 592 } else if (n.isFile()) { 593 save(out, n.asFile()); 594 } else if (n.isSymlink()) { 595 save(out, n.asSymlink()); 596 } 597 } 598 599 private void save(OutputStream out, INodeDirectory n) throws IOException { 600 INodeSection.INodeDirectory.Builder b = buildINodeDirectory(n, 601 parent.getSaverContext()); 602 INodeSection.INode r = buildINodeCommon(n) 603 .setType(INodeSection.INode.Type.DIRECTORY).setDirectory(b).build(); 604 r.writeDelimitedTo(out); 605 } 606 607 private void save(OutputStream out, INodeFile n) throws IOException { 608 INodeSection.INodeFile.Builder b = buildINodeFile(n, 609 parent.getSaverContext()); 610 611 if (n.getBlocks() != null) { 612 for (Block block : n.getBlocks()) { 613 b.addBlocks(PBHelper.convert(block)); 614 } 615 } 616 617 FileUnderConstructionFeature uc = n.getFileUnderConstructionFeature(); 618 if (uc != null) { 619 INodeSection.FileUnderConstructionFeature f = 620 INodeSection.FileUnderConstructionFeature 621 .newBuilder().setClientName(uc.getClientName()) 622 .setClientMachine(uc.getClientMachine()).build(); 623 b.setFileUC(f); 624 } 625 626 INodeSection.INode r = buildINodeCommon(n) 627 .setType(INodeSection.INode.Type.FILE).setFile(b).build(); 628 r.writeDelimitedTo(out); 629 } 630 631 private void save(OutputStream out, INodeSymlink n) throws IOException { 632 SaverContext state = parent.getSaverContext(); 633 INodeSection.INodeSymlink.Builder b = INodeSection.INodeSymlink 634 .newBuilder() 635 .setPermission(buildPermissionStatus(n, state.getStringMap())) 636 .setTarget(ByteString.copyFrom(n.getSymlink())) 637 .setModificationTime(n.getModificationTime()) 638 .setAccessTime(n.getAccessTime()); 639 640 INodeSection.INode r = buildINodeCommon(n) 641 .setType(INodeSection.INode.Type.SYMLINK).setSymlink(b).build(); 642 r.writeDelimitedTo(out); 643 } 644 645 private final INodeSection.INode.Builder buildINodeCommon(INode n) { 646 return INodeSection.INode.newBuilder() 647 .setId(n.getId()) 648 .setName(ByteString.copyFrom(n.getLocalNameBytes())); 649 } 650 } 651 652 private FSImageFormatPBINode() { 653 } 654}