Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,9 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.S3SecretManager;
import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.HasCompletedRequestInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
Expand Down Expand Up @@ -409,6 +411,46 @@ private String addToBatch(Queue<Entry> buffer, BatchOperation batchOperation) {
try {
addToBatchWithTrace(omResponse,
() -> response.checkAndUpdateDB(omMetadataManager, batchOperation));

// This is a strawman approach and requires some discussion
// with the community on approach.
//
// At the moment any response type which we want to capture a
// OmCompletedRequestInfo for is required to implement the
// interface HasCompletedRequestInfo and the method
// getCompletedRequestInfo() and we then have this extra step
// here in the double buffer to capture the rows.
//
// It would seem ideal that the double buffer shouldn't have to
// know/care that there is the concept of capturing this
// OmCompletedRequestInfo row for certain response times but the
// approach described above seemed like the least invasive
// approach overall. I am open to other views.
//
// Other approaches I considered:
// - adding a similar getCompletedRequestInfo method to each
// *request* type which want to capture a row for. The downside
// of this was that since requests are not part of the
// addToBatch flow the OmCompletedRequestInfo instance then had
// to be passed through from the request to the relevant
// response constructors and this created quite a bit of code
// churn which I perceived as messy
//
// * in terms of the actual data capture, rather than this
// "instanceof" approach in this class I toyed with
// having each response type which we want to capture a row for
// capturing it itself in its on implementation of addDBBatch
// (i.e. no need for any new code in this class). This
// seemed to be a bit messier to me in terms of code duplication
//
if (response instanceof HasCompletedRequestInfo) {
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@ChenSammi / @errose28 I would be grateful for your feedback on this approach to capturing the data for the new entity class by adding it to the transaction batch here OzoneManagerDoubleBuffer.

I discussed this briefly with @errose28 on the last community call and have made some revisions informed by that discussion although it is not exactly what we discussed. However, I the comment above explains my thought process (i.e. that it could have been added to the addToDBBatch method of each relevant response implementation rather than have any code footprint in OzoneManagerDoubleBuffer but that approach was messier than I would have liked). I'd be happy to hear alternate takes

NOTE: I'm aware this is lacking unit tests but I'd like to come to some consensus on where the code is going to live first

OmCompletedRequestInfo completedRequestInfo = ((HasCompletedRequestInfo) response).getCompletedRequestInfo(
entry.getTermIndex().getIndex());

omMetadataManager.getCompletedRequestInfoTable().putWithBatch(
batchOperation, completedRequestInfo.getTrxLogIndex(), completedRequestInfo);
}

} catch (IOException ex) {
// During Adding to RocksDB batch entry got an exception.
// We should terminate the OM.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
.setOpenVersion(openVersion).build())
.setCmdType(CreateFile);
omClientResponse = new OMFileCreateResponse(omResponse.build(),
omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject());
omKeyInfo, missingParentInfos, clientID, omBucketInfo.copyObject(),
isRecursive, isOverWrite);

result = Result.SUCCESS;
} catch (IOException | InvalidPathException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
.setCmdType(Type.CreateFile);
omClientResponse = new OMFileCreateResponseWithFSO(omResponse.build(),
omFileInfo, missingParentInfos, clientID,
omBucketInfo.copyObject(), volumeId);
omBucketInfo.copyObject(), volumeId,
isRecursive, isOverWrite);

result = Result.SUCCESS;
} catch (IOException | InvalidPathException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,8 @@ private OMClientResponse renameKey(OmKeyInfo toKeyParent, String toKeyName,
OMClientResponse omClientResponse = new OMKeyRenameResponseWithFSO(
omResponse.setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
dbFromKey, dbToKey, fromKeyParent, toKeyParent, fromKeyValue,
omBucketInfo, isRenameDirectory, getBucketLayout());
omBucketInfo, isRenameDirectory, getBucketLayout(),
fromKeyName, toKeyName);
return omClientResponse;
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.response;

import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;

/**
* Interface to define that a response class provides a
* OmCompletedRequestInfo implementation.
*/
public interface HasCompletedRequestInfo {

OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex);
}
Original file line number Diff line number Diff line change
Expand Up @@ -26,16 +26,19 @@
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.HasCompletedRequestInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for CreateBucket request.
*/
@CleanupTableInfo(cleanupTables = {BUCKET_TABLE, VOLUME_TABLE})
public final class OMBucketCreateResponse extends OMClientResponse {
public final class OMBucketCreateResponse extends OMClientResponse implements HasCompletedRequestInfo {

private final OmBucketInfo omBucketInfo;
private final OmVolumeArgs omVolumeArgs;
Expand Down Expand Up @@ -87,5 +90,16 @@ public OmBucketInfo getOmBucketInfo() {
return omBucketInfo;
}

@Override
public OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.CreateBucket)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(omBucketInfo.getVolumeName())
.setBucketName(omBucketInfo.getBucketName())
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,19 @@
import java.io.IOException;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.HasCompletedRequestInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for DeleteBucket request.
*/
@CleanupTableInfo(cleanupTables = {BUCKET_TABLE, VOLUME_TABLE})
public final class OMBucketDeleteResponse extends OMClientResponse {
public final class OMBucketDeleteResponse extends OMClientResponse implements HasCompletedRequestInfo {

private String volumeName;
private String bucketName;
Expand Down Expand Up @@ -89,5 +92,15 @@ public String getBucketName() {
return bucketName;
}

@Override
public OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.DeleteBucket)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}

Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,22 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.HasCompletedRequestInfo;
import org.apache.hadoop.ozone.om.response.key.OmKeyResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Response for create directory request.
*/
@CleanupTableInfo(cleanupTables = {KEY_TABLE})
public class OMDirectoryCreateResponse extends OmKeyResponse {
public class OMDirectoryCreateResponse extends OmKeyResponse implements HasCompletedRequestInfo {

private static final Logger LOG =
LoggerFactory.getLogger(OMDirectoryCreateResponse.class);
Expand Down Expand Up @@ -98,4 +101,17 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
LOG.debug("Directory already exists. addToDBBatch is a no-op");
}
}

@Override
public OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.CreateDirectory)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(dirKeyInfo.getVolumeName())
.setBucketName(dirKeyInfo.getBucketName())
.setKeyName(dirKeyInfo.getKeyName())
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,23 +24,32 @@
import java.util.List;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for crate file request.
*/
@CleanupTableInfo(cleanupTables = {KEY_TABLE, OPEN_KEY_TABLE})
public class OMFileCreateResponse extends OMKeyCreateResponse {

private boolean isRecursive;
private boolean isOverWrite;

public OMFileCreateResponse(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo, @Nonnull List<OmKeyInfo> parentKeyInfos,
long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo) {
@Nonnull OmBucketInfo omBucketInfo,
boolean isRecursive, boolean isOverWrite) {
super(omResponse, omKeyInfo, parentKeyInfos, openKeySessionID,
omBucketInfo);

this.isRecursive = isRecursive;
this.isOverWrite = isOverWrite;
}

/**
Expand All @@ -53,4 +62,13 @@ public OMFileCreateResponse(@Nonnull OMResponse omResponse, @Nonnull
checkStatusNotOK();
}

@Override
public Type getOperationType() {
return Type.CreateFile;
}

@Override
public OmCompletedRequestInfo.OperationArgs getCompletedRequestInfoArgs() {
return new OmCompletedRequestInfo.OperationArgs.CreateFileArgs(isRecursive, isOverWrite);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,29 @@
BUCKET_TABLE})
public class OMFileCreateResponseWithFSO extends OMFileCreateResponse {

private static final boolean DEFAULT_IS_RECURSIVE = false;
private static final boolean DEFAULT_IS_OVERWRITE = false;

private List<OmDirectoryInfo> parentDirInfos;
private long volumeId;

public OMFileCreateResponseWithFSO(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo,
@Nonnull List<OmDirectoryInfo> parentDirInfos, long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo, @Nonnull long volumeId) {
this(omResponse, omKeyInfo, parentDirInfos, openKeySessionID,
omBucketInfo, volumeId, DEFAULT_IS_RECURSIVE,
DEFAULT_IS_OVERWRITE);
}

@SuppressWarnings("checkstyle:ParameterNumber")
public OMFileCreateResponseWithFSO(@Nonnull OMResponse omResponse,
@Nonnull OmKeyInfo omKeyInfo,
@Nonnull List<OmDirectoryInfo> parentDirInfos, long openKeySessionID,
@Nonnull OmBucketInfo omBucketInfo, @Nonnull long volumeId,
boolean isRecursive, boolean isOverWrite) {
super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
omBucketInfo);
omBucketInfo, isRecursive, isOverWrite);
this.parentDirInfos = parentDirInfos;
this.volumeId = volumeId;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,20 @@
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.HasCompletedRequestInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;

/**
* Response for CommitKey request.
*/
@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, KEY_TABLE, DELETED_TABLE,
BUCKET_TABLE})
public class OMKeyCommitResponse extends OmKeyResponse {
public class OMKeyCommitResponse extends OmKeyResponse implements HasCompletedRequestInfo {

private OmKeyInfo omKeyInfo;
private String ozoneKeyName;
Expand Down Expand Up @@ -154,4 +157,17 @@ protected boolean isHSync() {
public OmKeyInfo getNewOpenKeyInfo() {
return newOpenKeyInfo;
}

@Override
public OmCompletedRequestInfo getCompletedRequestInfo(long trxnLogIndex) {
return OmCompletedRequestInfo.newBuilder()
.setTrxLogIndex(trxnLogIndex)
.setCmdType(Type.CommitKey)
.setCreationTime(System.currentTimeMillis())
.setVolumeName(omKeyInfo.getVolumeName())
.setBucketName(omKeyInfo.getBucketName())
.setKeyName(omKeyInfo.getKeyName())
.setOpArgs(new OmCompletedRequestInfo.OperationArgs.NoArgs())
.build();
}
}
Loading