diff --git a/Dockerfile b/Dockerfile
index c9aa6e9fcb40cba228c9a4d2c0634eb055e38fdf..c2c5efa46dc17b7e81fa548e40a6e64cd29abae3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -74,6 +74,7 @@ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.gi
  && ln -s $(find /opt/otbtf -type d -wholename "*/site-packages/tensorflow/include") /opt/otbtf/include/tf \
  # The only missing header in the wheel
  && cp tensorflow/cc/saved_model/tag_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \
+ && cp tensorflow/cc/saved_model/signature_constants.h /opt/otbtf/include/tf/tensorflow/cc/saved_model/ \
  # Symlink external libs (required for MKL - libiomp5)
  && for f in $(find -L /opt/otbtf/include/tf -wholename "*/external/*/*.so"); do ln -s $f /opt/otbtf/lib/; done \
  # Compress and save TF binaries
@@ -90,7 +91,10 @@ WORKDIR /src/otb
 
 # SuperBuild OTB
 COPY tools/docker/build-flags-otb.txt ./
-RUN git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git \
+RUN apt-get update -y \
+ && apt-get install --reinstall ca-certificates -y \
+ && update-ca-certificates \
+ && git clone --single-branch -b $OTB https://gitlab.orfeo-toolbox.org/orfeotoolbox/otb.git \
  && mkdir -p build \
  && cd build \
  # Set GL/Qt build flags
diff --git a/include/otbTensorflowCommon.cxx b/include/otbTensorflowCommon.cxx
index 662c9d3e979c5e67ccf9effc4564c9d9fd5c6d0e..b7a27c60c5ef49fbee42556ace70b54751f682f9 100644
--- a/include/otbTensorflowCommon.cxx
+++ b/include/otbTensorflowCommon.cxx
@@ -11,8 +11,10 @@
 =========================================================================*/
 #include "otbTensorflowCommon.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 //
 // Environment variable for the number of sources in "Multisource" applications
@@ -22,21 +24,21 @@ const std::string ENV_VAR_NAME_NSOURCES = "OTB_TF_NSOURCES";
 //
 // Get the environment variable as int
 //
-int GetEnvironmentVariableAsInt(const std::string & variableName)
+int
+GetEnvironmentVariableAsInt(const std::string & variableName)
 {
-  int ret = -1;
-  char const* tmp = getenv( variableName.c_str() );
-  if ( tmp != NULL )
+  int          ret = -1;
+  char const * tmp = getenv(variableName.c_str());
+  if (tmp != NULL)
   {
-    std::string s( tmp );
+    std::string s(tmp);
     try
     {
       ret = std::stoi(s);
     }
-    catch(...)
+    catch (...)
     {
-      itkGenericExceptionMacro("Error parsing variable "
-          << variableName << " as integer. Value is " << s);
+      itkGenericExceptionMacro("Error parsing variable " << variableName << " as integer. Value is " << s);
     }
   }
 
@@ -47,7 +49,8 @@ int GetEnvironmentVariableAsInt(const std::string & variableName)
 // This function returns the numeric content of the ENV_VAR_NAME_NSOURCES
 // environment variable
 //
-int GetNumberOfSources()
+int
+GetNumberOfSources()
 {
   int ret = GetEnvironmentVariableAsInt(ENV_VAR_NAME_NSOURCES);
   if (ret != -1)
@@ -60,15 +63,18 @@ int GetNumberOfSources()
 //
 // This function copy a patch from an input image to an output image
 //
-template<class TImage>
-void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & inputPatchIndex,
-    typename TImage::Pointer outputImg, typename TImage::IndexType & outputPatchIndex,
-    typename TImage::SizeType patchSize)
+template <class TImage>
+void
+CopyPatch(typename TImage::Pointer     inputImg,
+          typename TImage::IndexType & inputPatchIndex,
+          typename TImage::Pointer     outputImg,
+          typename TImage::IndexType & outputPatchIndex,
+          typename TImage::SizeType    patchSize)
 {
-  typename TImage::RegionType inputPatchRegion(inputPatchIndex, patchSize);
-  typename TImage::RegionType outputPatchRegion(outputPatchIndex, patchSize);
-  typename itk::ImageRegionConstIterator<TImage> inIt (inputImg, inputPatchRegion);
-  typename itk::ImageRegionIterator<TImage> outIt (outputImg, outputPatchRegion);
+  typename TImage::RegionType                    inputPatchRegion(inputPatchIndex, patchSize);
+  typename TImage::RegionType                    outputPatchRegion(outputPatchIndex, patchSize);
+  typename itk::ImageRegionConstIterator<TImage> inIt(inputImg, inputPatchRegion);
+  typename itk::ImageRegionIterator<TImage>      outIt(outputImg, outputPatchRegion);
   for (inIt.GoToBegin(), outIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt, ++outIt)
   {
     outIt.Set(inIt.Get());
@@ -78,9 +84,9 @@ void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & i
 //
 // Get image infos
 //
-template<class TImage>
-void GetImageInfo(typename TImage::Pointer image,
-    unsigned int & sizex, unsigned int & sizey, unsigned int & nBands)
+template <class TImage>
+void
+GetImageInfo(typename TImage::Pointer image, unsigned int & sizex, unsigned int & sizey, unsigned int & nBands)
 {
   nBands = image->GetNumberOfComponentsPerPixel();
   sizex = image->GetLargestPossibleRegion().GetSize(0);
@@ -90,8 +96,9 @@ void GetImageInfo(typename TImage::Pointer image,
 //
 // Propagate the requested region in the image
 //
-template<class TImage>
-void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region)
+template <class TImage>
+void
+PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region)
 {
   image->SetRequestedRegion(region);
   image->PropagateRequestedRegion();
@@ -101,13 +108,16 @@ void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::R
 //
 // Sample an input image at the specified location
 //
-template<class TImage>
-bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer outPtr,
-    typename TImage::PointType point, unsigned int elemIdx,
-    typename TImage::SizeType patchSize)
+template <class TImage>
+bool
+SampleImage(const typename TImage::Pointer inPtr,
+            typename TImage::Pointer       outPtr,
+            typename TImage::PointType     point,
+            unsigned int                   elemIdx,
+            typename TImage::SizeType      patchSize)
 {
   typename TImage::IndexType index, outIndex;
-  bool canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
+  bool                       canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
   if (canTransform)
   {
     outIndex[0] = 0;
@@ -128,7 +138,6 @@ bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer
     }
   }
   return false;
-
 }
 
 } // end namespace tf
diff --git a/include/otbTensorflowCommon.h b/include/otbTensorflowCommon.h
index fbd7281035185c2acc3a56dac3850a23d76280df..a012173c66ec9c4a10ae0f5f7df9908f01dd4833 100644
--- a/include/otbTensorflowCommon.h
+++ b/include/otbTensorflowCommon.h
@@ -22,38 +22,49 @@
 #include "itkImageRegionConstIterator.h"
 #include "itkImageRegionIterator.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Environment variable for the number of sources in "Multisource" applications
 extern const std::string ENV_VAR_NAME_NSOURCES;
 
 // Get the environment variable as int
-int GetEnvironmentVariableAsInt(const std::string & variableName);
+int
+GetEnvironmentVariableAsInt(const std::string & variableName);
 
 // Get the value (as int) of the environment variable ENV_VAR_NAME_NSOURCES
-int GetNumberOfSources();
+int
+GetNumberOfSources();
 
 // This function copy a patch from an input image to an output image
-template<class TImage>
-void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & inputPatchIndex,
-    typename TImage::Pointer outputImg, typename TImage::IndexType & outputPatchIndex,
-    typename TImage::SizeType patchSize);
+template <class TImage>
+void
+CopyPatch(typename TImage::Pointer     inputImg,
+          typename TImage::IndexType & inputPatchIndex,
+          typename TImage::Pointer     outputImg,
+          typename TImage::IndexType & outputPatchIndex,
+          typename TImage::SizeType    patchSize);
 
 // Get image infos
-template<class TImage>
-void GetImageInfo(typename TImage::Pointer image,
-    unsigned int & sizex, unsigned int & sizey, unsigned int & nBands);
+template <class TImage>
+void
+GetImageInfo(typename TImage::Pointer image, unsigned int & sizex, unsigned int & sizey, unsigned int & nBands);
 
 // Propagate the requested region in the image
-template<class TImage>
-void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region);
+template <class TImage>
+void
+PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region);
 
 // Sample an input image at the specified location
-template<class TImage>
-bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer outPtr,
-    typename TImage::PointType point, unsigned int elemIdx,
-    typename TImage::SizeType patchSize);
+template <class TImage>
+bool
+SampleImage(const typename TImage::Pointer inPtr,
+            typename TImage::Pointer       outPtr,
+            typename TImage::PointType     point,
+            unsigned int                   elemIdx,
+            typename TImage::SizeType      patchSize);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowCopyUtils.cxx b/include/otbTensorflowCopyUtils.cxx
index e969051102c6babaaa2aa3433dd25bafb64af18e..3aa117691c96993574afc27a085cf64a0140524e 100644
--- a/include/otbTensorflowCopyUtils.cxx
+++ b/include/otbTensorflowCopyUtils.cxx
@@ -187,7 +187,13 @@ GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto)
     return 1;
   // any other dimension: we assume that the last dimension represent the
   // number of channels in the output image.
-  return proto.dim(nDims - 1).size();
+  tensorflow::int64 nbChannels = proto.dim(nDims - 1).size();
+  if (nbChannels < 1)
+    itkGenericExceptionMacro("Cannot determine the size of the last dimension of one output tensor. Dimension index is "
+                             << (nDims - 1)
+                             << ". Please rewrite your model with output tensors having a shape where the last "
+                                "dimension is a constant value.");
+  return nbChannels;
 }
 
 //
@@ -218,9 +224,10 @@ CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
   const tensorflow::int64 nElmI = bufferRegion.GetNumberOfPixels() * outputDimSize_C;
   if (nElmI != nElmT)
   {
-    itkGenericExceptionMacro("Number of elements in the tensor is " << nElmT 
-                             << " but image outputRegion has " << nElmI << " values to fill.\n"
-                             << "Buffer region is: \n" << bufferRegion << "\n"
+    itkGenericExceptionMacro("Number of elements in the tensor is "
+                             << nElmT << " but image outputRegion has " << nElmI << " values to fill.\n"
+                             << "Buffer region is: \n"
+                             << bufferRegion << "\n"
                              << "Number of components in the output image: " << outputDimSize_C << "\n"
                              << "Tensor shape: " << PrintTensorShape(tensor.shape()) << "\n"
                              << "Please check the input(s) field of view (FOV), "
@@ -347,7 +354,7 @@ ValueToTensor(std::string value)
   }
 
   // Create tensor
-  tensorflow::TensorShape shape({values.size()});
+  tensorflow::TensorShape shape({ values.size() });
   tensorflow::Tensor      out(tensorflow::DT_BOOL, shape);
   if (is_digit)
   {
@@ -409,7 +416,7 @@ ValueToTensor(std::string value)
     }
     idx++;
   }
-  otbLogMacro(Debug,  << "Returning tensor: "<< out.DebugString());
+  otbLogMacro(Debug, << "Returning tensor: " << out.DebugString());
 
   return out;
 }
diff --git a/include/otbTensorflowCopyUtils.h b/include/otbTensorflowCopyUtils.h
index 174587913beb5a73f78cbffe614aef23c78c8147..59e1a7443ff78511b42d6d67c74023cd49864235 100644
--- a/include/otbTensorflowCopyUtils.h
+++ b/include/otbTensorflowCopyUtils.h
@@ -34,57 +34,94 @@
 #include <string>
 #include <regex>
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Generate a string with TensorShape infos
-std::string PrintTensorShape(const tensorflow::TensorShape & shp);
+std::string
+PrintTensorShape(const tensorflow::TensorShape & shp);
 
 // Generate a string with tensor infos
-std::string PrintTensorInfos(const tensorflow::Tensor & tensor);
+std::string
+PrintTensorInfos(const tensorflow::Tensor & tensor);
 
 // Create a tensor with the good datatype
-template<class TImage>
-tensorflow::Tensor CreateTensor(tensorflow::TensorShape & shape);
+template <class TImage>
+tensorflow::Tensor
+CreateTensor(tensorflow::TensorShape & shape);
 
 // Populate a tensor with the buffered region of a vector image
-template<class TImage>
-void PopulateTensorFromBufferedVectorImage(const typename TImage::Pointer bufferedimagePtr, tensorflow::Tensor & out_tensor);
+template <class TImage>
+void
+PopulateTensorFromBufferedVectorImage(const typename TImage::Pointer bufferedimagePtr, tensorflow::Tensor & out_tensor);
 
 // Populate the buffered region of a vector image with a given tensor's values
-template<class TImage>
-void TensorToImageBuffer(const tensorflow::Tensor & tensor, typename TImage::Pointer & image);
+template <class TImage>
+void
+TensorToImageBuffer(const tensorflow::Tensor & tensor, typename TImage::Pointer & image);
 
 // Recopy an VectorImage region into a 4D-shaped tensorflow::Tensor ({-1, sz_y, sz_x, sz_bands})
-template<class TImage, class TValueType=typename TImage::InternalPixelType>
-void RecopyImageRegionToTensor(const typename TImage::Pointer inputPtr,  const typename TImage::RegionType & region, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage, class TValueType = typename TImage::InternalPixelType>
+void
+RecopyImageRegionToTensor(const typename TImage::Pointer      inputPtr,
+                          const typename TImage::RegionType & region,
+                          tensorflow::Tensor &                tensor,
+                          unsigned int                        elemIdx);
 
 // Recopy an VectorImage region into a 4D-shaped tensorflow::Tensor (TValueType-agnostic function)
-template<class TImage>
-void RecopyImageRegionToTensorWithCast(const typename TImage::Pointer inputPtr,  const typename TImage::RegionType & region, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage>
+void
+RecopyImageRegionToTensorWithCast(const typename TImage::Pointer      inputPtr,
+                                  const typename TImage::RegionType & region,
+                                  tensorflow::Tensor &                tensor,
+                                  unsigned int                        elemIdx);
 
 // Sample a centered patch
-template<class TImage>
-void SampleCenteredPatch(const typename TImage::Pointer inputPtr, const typename TImage::IndexType & centerIndex, const typename TImage::SizeType & patchSize, tensorflow::Tensor & tensor, unsigned int elemIdx);
-template<class TImage>
-void SampleCenteredPatch(const typename TImage::Pointer inputPtr, const typename TImage::PointType & centerCoord, const typename TImage::SizeType & patchSize, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage>
+void
+SampleCenteredPatch(const typename TImage::Pointer     inputPtr,
+                    const typename TImage::IndexType & centerIndex,
+                    const typename TImage::SizeType &  patchSize,
+                    tensorflow::Tensor &               tensor,
+                    unsigned int                       elemIdx);
+template <class TImage>
+void
+SampleCenteredPatch(const typename TImage::Pointer     inputPtr,
+                    const typename TImage::PointType & centerCoord,
+                    const typename TImage::SizeType &  patchSize,
+                    tensorflow::Tensor &               tensor,
+                    unsigned int                       elemIdx);
 
 // Return the number of channels from the TensorflowShapeProto
-tensorflow::int64 GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto);
+tensorflow::int64
+GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto);
 
 // Copy a tensor into the image region
-template<class TImage, class TValueType>
-void CopyTensorToImageRegion(const tensorflow::Tensor & tensor, typename TImage::Pointer outputPtr, const typename TImage::RegionType & region, int & channelOffset);
+template <class TImage, class TValueType>
+void
+CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
+                        typename TImage::Pointer            outputPtr,
+                        const typename TImage::RegionType & region,
+                        int &                               channelOffset);
 
 // Copy a tensor into the image region (TValueType-agnostic version)
-template<class TImage>
-void CopyTensorToImageRegion(const tensorflow::Tensor & tensor, const typename TImage::RegionType & bufferRegion, typename TImage::Pointer outputPtr, const typename TImage::RegionType & outputRegion, int & channelOffset);
+template <class TImage>
+void
+CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
+                        const typename TImage::RegionType & bufferRegion,
+                        typename TImage::Pointer            outputPtr,
+                        const typename TImage::RegionType & outputRegion,
+                        int &                               channelOffset);
 
 // Convert a value into a tensor
-tensorflow::Tensor ValueToTensor(std::string value);
+tensorflow::Tensor
+ValueToTensor(std::string value);
 
 // Convert an expression into a dict
-std::pair<std::string, tensorflow::Tensor> ExpressionToTensor(std::string expression);
+std::pair<std::string, tensorflow::Tensor>
+ExpressionToTensor(std::string expression);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowDataTypeBridge.cxx b/include/otbTensorflowDataTypeBridge.cxx
index a510cb4ea5ecab0c1505e690d79922b2299ddc0d..71fcd8c6beca73f611aa919b237c9d719f6ec4a7 100644
--- a/include/otbTensorflowDataTypeBridge.cxx
+++ b/include/otbTensorflowDataTypeBridge.cxx
@@ -11,14 +11,17 @@
 =========================================================================*/
 #include "otbTensorflowDataTypeBridge.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 //
 // returns the datatype used by tensorflow
 //
-template<class Type>
-tensorflow::DataType GetTensorflowDataType()
+template <class Type>
+tensorflow::DataType
+GetTensorflowDataType()
 {
   if (typeid(Type) == typeid(bool))
   {
@@ -74,8 +77,9 @@ tensorflow::DataType GetTensorflowDataType()
 //
 // Return true if the tensor data type is correct
 //
-template<class Type>
-bool HasSameDataType(const tensorflow::Tensor & tensor)
+template <class Type>
+bool
+HasSameDataType(const tensorflow::Tensor & tensor)
 {
   return GetTensorflowDataType<Type>() == tensor.dtype();
 }
@@ -83,7 +87,8 @@ bool HasSameDataType(const tensorflow::Tensor & tensor)
 //
 // Return the datatype as string
 //
-tensorflow::string GetDataTypeAsString(tensorflow::DataType dt)
+tensorflow::string
+GetDataTypeAsString(tensorflow::DataType dt)
 {
   return tensorflow::DataTypeString(dt);
 }
diff --git a/include/otbTensorflowDataTypeBridge.h b/include/otbTensorflowDataTypeBridge.h
index af6be18d335761b7261e6b8c7288cf9b07122bc8..e815dafcba8bbb408a843e3ed63aa9a5a8b8dfe3 100644
--- a/include/otbTensorflowDataTypeBridge.h
+++ b/include/otbTensorflowDataTypeBridge.h
@@ -16,19 +16,24 @@
 #include "tensorflow/core/framework/types.h"
 #include "tensorflow/core/framework/tensor.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // returns the datatype used by tensorflow
-template<class Type>
-tensorflow::DataType GetTensorflowDataType();
+template <class Type>
+tensorflow::DataType
+GetTensorflowDataType();
 
 // Return true if the tensor data type is correct
-template<class Type>
-bool HasSameDataType(const tensorflow::Tensor & tensor);
+template <class Type>
+bool
+HasSameDataType(const tensorflow::Tensor & tensor);
 
 // Return datatype as string
-tensorflow::string GetDataTypeAsString(tensorflow::DataType dt);
+tensorflow::string
+GetDataTypeAsString(tensorflow::DataType dt);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowGraphOperations.cxx b/include/otbTensorflowGraphOperations.cxx
index d40c4da6a2f49a86cb28069094b4ab9f0cc5b231..b8e0920e0b04d6cb1dc30f5bf3e8fcbe30d29b1b 100644
--- a/include/otbTensorflowGraphOperations.cxx
+++ b/include/otbTensorflowGraphOperations.cxx
@@ -87,19 +87,39 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
                     std::vector<std::string> &                                           tensorsNames,
                     std::vector<std::string> &                                           layerNames,
                     std::vector<tensorflow::TensorShapeProto> &                          shapes,
-                    std::vector<tensorflow::DataType> &                                  dataTypes)
+                    std::vector<tensorflow::DataType> &                                  dataTypes,
+                    std::vector<std::string>                                             blackList = {})
 {
-  // Allocation
+  // Clear shapes, datatypes, and layers names
   shapes.clear();
   dataTypes.clear();
   layerNames.clear();
 
   // Debug infos
-  otbLogMacro(Debug, << "Nodes contained in the model: ");
+  otbLogMacro(Debug, << "Nodes contained in the model:");
   for (auto const & layer : layers)
     otbLogMacro(Debug, << "\t" << layer.first);
 
-  // When the user doesn't specify output.names, m_OutputTensors defaults to an empty list that we can not iterate over.
+  // Sort nodes names alphabetically
+  std::size_t                              k = 0;             // Keep track of the indices in the protobuf map
+  std::vector<std::pair<std::string, int>> sortedLayersNames; // vector of (name, index) pairs
+  for (auto const & layer : layers)
+  {
+    // We exclude names, if any in the blacklist. Useful to avoid confusion
+    // between user placeholders (aka constants) and input tensors.
+    // Not used for output tensors.
+    if (std::count(blackList.begin(), blackList.end(), layer.first) == 0)
+    {
+      sortedLayersNames.emplace_back(layer.first, k);
+    }
+    k++;
+  }
+  std::sort(sortedLayersNames.begin(), sortedLayersNames.end());
+  otbLogMacro(Debug, << "Sorted (alphabetically) nodes names:");
+  for (auto const & name : sortedLayersNames)
+    otbLogMacro(Debug, << "\t" << name.first << " (index: " << name.second << ")");
+
+  // When the user doesn't specify output.names, tensorsNames defaults to an empty list that we can not iterate over.
   // We change it to a list containing an empty string [""]
   if (tensorsNames.size() == 0)
   {
@@ -108,8 +128,8 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
   }
 
   // Next, we fill layerNames
-  int k = 0; // counter used for tensorsNames
-  for (auto const & name: tensorsNames)
+  k = 0; // counter used for tensorsNames
+  for (auto const & name : tensorsNames)
   {
     bool                   found = false;
     tensorflow::TensorInfo tensor_info;
@@ -118,18 +138,24 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
     if (name.size() == 0)
     {
       found = true;
-      // select the k-th element of `layers`
-      auto it = layers.begin();
-      std::advance(it, k);
+      // select the k-th element of `layers` names, alphabeticallly sorted
+      const std::string kthName = sortedLayersNames[k].first;
+      auto              it = layers.begin();
+      const int         kthIndex = sortedLayersNames[k].second;
+      std::advance(it, kthIndex);
       layerNames.push_back(it->second.name());
       tensor_info = it->second;
-      otbLogMacro(Debug, << "Input " << k << " corresponds to " << it->first << " in the model");
+      if (sortedLayersNames.size() > 1)
+        otbLogMacro(Warning,
+                    << "The provided tensor name is empty, and there are multiple available candidates in the graph. "
+                       "Available tensors names from the graph have been sorted alphabetically, and the tensor #"
+                    << kthIndex << " (aka \"" << it->first << "\") will be used. ");
     }
 
     // Else, if the user specified the placeholdername, find the corresponding layer inside the model
     else
     {
-      otbLogMacro(Debug, << "Searching for corresponding node of: " << name << "... ");
+      otbLogMacro(Debug, << "Searching for corresponding node of \"" << name << "\"... ");
       for (auto const & layer : layers)
       {
         // layer is a pair (name, tensor_info)
@@ -143,7 +169,7 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
           otbLogMacro(Debug, << "Found: " << layer.second.name() << " in the model");
         }
       } // next layer
-    } // end else
+    }   // end else
 
     k += 1;
 
@@ -178,7 +204,7 @@ PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::s
     tensorflow::NodeDef node = graph.node(i);
     std::cout << i << "\t" << node.name() << std::endl;
 
-    for (auto const & name: nodesNames)
+    for (auto const & name : nodesNames)
     {
       if (node.name().compare(name) == 0)
       {
diff --git a/include/otbTensorflowGraphOperations.h b/include/otbTensorflowGraphOperations.h
index 6ad4a4e29880e10a5f10cbc4f1e945db9ca3c6a6..dbfcaa8e57efe508850f0ef77e108ba7478c0538 100644
--- a/include/otbTensorflowGraphOperations.h
+++ b/include/otbTensorflowGraphOperations.h
@@ -27,27 +27,37 @@
 // OTB log
 #include "otbMacro.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Load SavedModel variables
-void RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
+void
+RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
 
 // Save SavedModel variables
-void SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
+void
+SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
 
 // Load SavedModel
-void LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle, std::vector<std::string> tagList);
+void
+LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle, std::vector<std::string> tagList);
 
 // Get the following attributes of the specified tensors (by name) of a graph:
 // - shape
 // - datatype
 // Here we assume that the node's output is a tensor
-void GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::TensorInfo> layers, std::vector<std::string> & tensorsNames,
-    std::vector<tensorflow::TensorShapeProto> & shapes, std::vector<tensorflow::DataType> & dataTypes);
+void
+GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::TensorInfo> layers,
+                    std::vector<std::string> &                                           tensorsNames,
+                    std::vector<tensorflow::TensorShapeProto> &                          shapes,
+                    std::vector<tensorflow::DataType> &                                  dataTypes,
+                    std::vector<std::string>                                             blackList);
 
 // Print a lot of stuff about the specified nodes of the graph
-void PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::string> & nodesNames);
+void
+PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::string> & nodesNames);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelBase.h b/include/otbTensorflowMultisourceModelBase.h
index d10648ea00afc6fc624fc9ffac8e19bbdffdf4f2..6c943d1f1e777f8f7d26fc6be7f529b34535b5c7 100644
--- a/include/otbTensorflowMultisourceModelBase.h
+++ b/include/otbTensorflowMultisourceModelBase.h
@@ -65,34 +65,32 @@ namespace otb
  *
  * \ingroup OTBTensorflow
  */
-template <class TInputImage, class TOutputImage=TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelBase :
-public itk::ImageToImageFilter<TInputImage, TOutputImage>
+template <class TInputImage, class TOutputImage = TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelBase : public itk::ImageToImageFilter<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowMultisourceModelBase             Self;
+  typedef TensorflowMultisourceModelBase                     Self;
   typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
+  typedef itk::SmartPointer<Self>                            Pointer;
+  typedef itk::SmartPointer<const Self>                      ConstPointer;
 
   /** Run-time type information (and related methods). */
   itkTypeMacro(TensorflowMultisourceModelBase, itk::ImageToImageFilter);
 
   /** Images typedefs */
-  typedef TInputImage                                ImageType;
-  typedef typename TInputImage::Pointer              ImagePointerType;
-  typedef typename TInputImage::PixelType            PixelType;
-  typedef typename TInputImage::InternalPixelType    InternalPixelType;
-  typedef typename TInputImage::IndexType            IndexType;
-  typedef typename TInputImage::IndexValueType       IndexValueType;
-  typedef typename TInputImage::PointType            PointType;
-  typedef typename TInputImage::SizeType             SizeType;
-  typedef typename TInputImage::SizeValueType        SizeValueType;
-  typedef typename TInputImage::SpacingType          SpacingType;
-  typedef typename TInputImage::RegionType           RegionType;
+  typedef TInputImage                             ImageType;
+  typedef typename TInputImage::Pointer           ImagePointerType;
+  typedef typename TInputImage::PixelType         PixelType;
+  typedef typename TInputImage::InternalPixelType InternalPixelType;
+  typedef typename TInputImage::IndexType         IndexType;
+  typedef typename TInputImage::IndexValueType    IndexValueType;
+  typedef typename TInputImage::PointType         PointType;
+  typedef typename TInputImage::SizeType          SizeType;
+  typedef typename TInputImage::SizeValueType     SizeValueType;
+  typedef typename TInputImage::SpacingType       SpacingType;
+  typedef typename TInputImage::RegionType        RegionType;
 
   /** Typedefs for parameters */
   typedef std::pair<std::string, tensorflow::Tensor> DictElementType;
@@ -104,15 +102,26 @@ public:
   typedef std::vector<tensorflow::Tensor>            TensorListType;
 
   /** Set and Get the Tensorflow session and graph */
-  void SetSavedModel(tensorflow::SavedModelBundle * saved_model) {m_SavedModel = saved_model;}
-  tensorflow::SavedModelBundle * GetSavedModel() {return m_SavedModel;}
+  void
+  SetSavedModel(tensorflow::SavedModelBundle * saved_model)
+  {
+    m_SavedModel = saved_model;
+  }
+  tensorflow::SavedModelBundle *
+  GetSavedModel()
+  {
+    return m_SavedModel;
+  }
 
   /** Get the SignatureDef */
-  tensorflow::SignatureDef GetSignatureDef();
+  tensorflow::SignatureDef
+  GetSignatureDef();
 
   /** Model parameters */
-  void PushBackInputTensorBundle(std::string name, SizeType receptiveField, ImagePointerType image);
-  void PushBackOuputTensorBundle(std::string name, SizeType expressionField);
+  void
+  PushBackInputTensorBundle(std::string name, SizeType receptiveField, ImagePointerType image);
+  void
+  PushBackOuputTensorBundle(std::string name, SizeType expressionField);
 
   /** Input placeholders names */
   itkSetMacro(InputPlaceholders, StringList);
@@ -131,8 +140,16 @@ public:
   itkGetMacro(OutputExpressionFields, SizeListType);
 
   /** User placeholders */
-  void SetUserPlaceholders(const DictType & dict) {m_UserPlaceholders = dict;}
-  DictType GetUserPlaceholders() {return m_UserPlaceholders;}
+  void
+  SetUserPlaceholders(const DictType & dict)
+  {
+    m_UserPlaceholders = dict;
+  }
+  DictType
+  GetUserPlaceholders()
+  {
+    return m_UserPlaceholders;
+  }
 
   /** Target nodes names */
   itkSetMacro(TargetNodesNames, StringList);
@@ -144,40 +161,47 @@ public:
   itkGetMacro(InputTensorsShapes, TensorShapeProtoList);
   itkGetMacro(OutputTensorsShapes, TensorShapeProtoList);
 
-  virtual void GenerateOutputInformation();
+  virtual void
+  GenerateOutputInformation();
 
 protected:
   TensorflowMultisourceModelBase();
-  virtual ~TensorflowMultisourceModelBase() {};
+  virtual ~TensorflowMultisourceModelBase(){};
 
-  virtual std::stringstream GenerateDebugReport(DictType & inputs);
+  virtual std::stringstream
+  GenerateDebugReport(DictType & inputs);
 
-  virtual void RunSession(DictType & inputs, TensorListType & outputs);
+  virtual void
+  RunSession(DictType & inputs, TensorListType & outputs);
 
 private:
-  TensorflowMultisourceModelBase(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelBase(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
   // Tensorflow graph and session
-  tensorflow::SavedModelBundle * m_SavedModel;          // The TensorFlow model
+  tensorflow::SavedModelBundle * m_SavedModel; // The TensorFlow model
 
   // Model parameters
-  StringList                 m_InputPlaceholders;       // Input placeholders names
-  SizeListType               m_InputReceptiveFields;    // Input receptive fields
-  StringList                 m_OutputTensors;           // Output tensors names
-  SizeListType               m_OutputExpressionFields;  // Output expression fields
-  DictType                   m_UserPlaceholders;        // User placeholders
-  StringList                 m_TargetNodesNames;        // User nodes target
+  StringList   m_InputPlaceholders;      // Input placeholders names
+  SizeListType m_InputReceptiveFields;   // Input receptive fields
+  StringList   m_OutputTensors;          // Output tensors names
+  SizeListType m_OutputExpressionFields; // Output expression fields
+  DictType     m_UserPlaceholders;       // User placeholders
+  StringList   m_TargetNodesNames;       // User nodes target
 
   // Internal, read-only
-  DataTypeListType           m_InputTensorsDataTypes;   // Input tensors datatype
-  DataTypeListType           m_OutputTensorsDataTypes;  // Output tensors datatype
-  TensorShapeProtoList       m_InputTensorsShapes;      // Input tensors shapes
-  TensorShapeProtoList       m_OutputTensorsShapes;     // Output tensors shapes
+  DataTypeListType     m_InputConstantsDataTypes; // Input constants datatype
+  DataTypeListType     m_InputTensorsDataTypes;   // Input tensors datatype
+  DataTypeListType     m_OutputTensorsDataTypes;  // Output tensors datatype
+  TensorShapeProtoList m_InputConstantsShapes;    // Input constants shapes
+  TensorShapeProtoList m_InputTensorsShapes;      // Input tensors shapes
+  TensorShapeProtoList m_OutputTensorsShapes;     // Output tensors shapes
 
   // Layer names inside the model corresponding to inputs and outputs
-  StringList m_InputLayers;                             // List of input names, as contained in the model
-  StringList m_OutputLayers;                            // List of output names, as contained in the model
+  StringList m_InputConstants; // List of constants names, as contained in the model
+  StringList m_InputLayers;    // List of input names, as contained in the model
+  StringList m_OutputLayers;   // List of output names, as contained in the model
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelBase.hxx b/include/otbTensorflowMultisourceModelBase.hxx
index 752b7c9d61a861d260dc4dfac89efb66e772a42b..07111b0fffc938da8ba3a47c19431b7179040fcd 100644
--- a/include/otbTensorflowMultisourceModelBase.hxx
+++ b/include/otbTensorflowMultisourceModelBase.hxx
@@ -18,28 +18,26 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::TensorflowMultisourceModelBase()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::TensorflowMultisourceModelBase()
 {
-  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max() );
-  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max() );
-  
+  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max());
+  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max());
+
   m_SavedModel = NULL;
 }
 
 template <class TInputImage, class TOutputImage>
 tensorflow::SignatureDef
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GetSignatureDef()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GetSignatureDef()
 {
-  auto signatures = this->GetSavedModel()->GetSignatures();
+  auto                     signatures = this->GetSavedModel()->GetSignatures();
   tensorflow::SignatureDef signature_def;
 
   if (signatures.size() == 0)
   {
-    itkExceptionMacro("There are no available signatures for this tag-set. \n" <<
-                      "Please check which tag-set to use by running "<<
-                      "`saved_model_cli show --dir your_model_dir --all`");
+    itkExceptionMacro("There are no available signatures for this tag-set. \n"
+                      << "Please check which tag-set to use by running "
+                      << "`saved_model_cli show --dir your_model_dir --all`");
   }
 
   // If serving_default key exists (which is the default for TF saved model), choose it as signature
@@ -57,8 +55,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::PushBackInputTensorBundle(std::string placeholder, SizeType receptiveField, ImagePointerType image)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::PushBackInputTensorBundle(std::string      placeholder,
+                                                                                     SizeType         receptiveField,
+                                                                                     ImagePointerType image)
 {
   Superclass::PushBackInput(image);
   m_InputReceptiveFields.push_back(receptiveField);
@@ -67,8 +66,7 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 std::stringstream
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GenerateDebugReport(DictType & inputs)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GenerateDebugReport(DictType & inputs)
 {
   // Create a debug report
   std::stringstream debugReport;
@@ -79,18 +77,18 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   debugReport << "Output image buffered region: " << outputReqRegion << "\n";
 
   // Describe inputs
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
   {
-    const ImagePointerType inputPtr = const_cast<TInputImage*>(this->GetInput(i));
-    const RegionType reqRegion = inputPtr->GetRequestedRegion();
+    const ImagePointerType inputPtr = const_cast<TInputImage *>(this->GetInput(i));
+    const RegionType       reqRegion = inputPtr->GetRequestedRegion();
     debugReport << "Input #" << i << ":\n";
     debugReport << "Requested region: " << reqRegion << "\n";
     debugReport << "Tensor \"" << inputs[i].first << "\": " << tf::PrintTensorInfos(inputs[i].second) << "\n";
   }
 
   // Show user placeholders
-  debugReport << "User placeholders:\n" ;
-  for (auto& dict: this->GetUserPlaceholders())
+  debugReport << "User placeholders:\n";
+  for (auto & dict : this->GetUserPlaceholders())
   {
     debugReport << "Tensor \"" << dict.first << "\": " << tf::PrintTensorInfos(dict.second) << "\n" << std::endl;
   }
@@ -101,25 +99,31 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::RunSession(DictType & inputs, TensorListType & outputs)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::RunSession(DictType & inputs, TensorListType & outputs)
 {
 
-  // Add the user's placeholders
-  std::copy(this->GetUserPlaceholders().begin(), this->GetUserPlaceholders().end(), std::back_inserter(inputs));
-
   // Run the TF session here
   // The session will initialize the outputs
 
-  // `inputs` corresponds to a mapping {name, tensor}, with the name being specified by the user when calling TensorFlowModelServe
-  // we must adapt it to `inputs_new`, that corresponds to a mapping {layerName, tensor}, with the layerName being from the model
+  // `inputs` corresponds to a mapping {name, tensor}, with the name being specified by the user when calling
+  // TensorFlowModelServe we must adapt it to `inputs_new`, that corresponds to a mapping {layerName, tensor}, with the
+  // layerName being from the model
   DictType inputs_new;
-  int k = 0;
-  for (auto& dict: inputs)
+
+  // Add the user's placeholders
+  std::size_t k = 0;
+  for (auto & dict : this->GetUserPlaceholders())
+  {
+    inputs_new.emplace_back(m_InputConstants[k], dict.second);
+    k++;
+  }
+
+  // Add input tensors
+  k = 0;
+  for (auto & dict : inputs)
   {
-    DictElementType element = {m_InputLayers[k], dict.second};
-    inputs_new.push_back(element);
-    k+=1;
+    inputs_new.emplace_back(m_InputLayers[k], dict.second);
+    k += 1;
   }
 
   // Run the session, evaluating our output tensors from the graph
@@ -132,16 +136,18 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
     std::stringstream debugReport = GenerateDebugReport(inputs);
 
     // Throw an exception with the report
-    itkExceptionMacro("Can't run the tensorflow session !\n" <<
-                      "Tensorflow error message:\n" << status.ToString() << "\n"
-                      "OTB Filter debug message:\n" << debugReport.str() );
+    itkExceptionMacro("Can't run the tensorflow session !\n"
+                      << "Tensorflow error message:\n"
+                      << status.ToString()
+                      << "\n"
+                         "OTB Filter debug message:\n"
+                      << debugReport.str());
   }
 }
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GenerateOutputInformation()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GenerateOutputInformation()
 {
 
   // Check that the number of the following is the same
@@ -151,9 +157,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   const unsigned int nbInputs = this->GetNumberOfInputs();
   if (nbInputs != m_InputReceptiveFields.size() || nbInputs != m_InputPlaceholders.size())
   {
-    itkExceptionMacro("Number of input images is " << nbInputs <<
-                      " but the number of input patches size is " << m_InputReceptiveFields.size() <<
-                      " and the number of input tensors names is " << m_InputPlaceholders.size());
+    itkExceptionMacro("Number of input images is "
+                      << nbInputs << " but the number of input patches size is " << m_InputReceptiveFields.size()
+                      << " and the number of input tensors names is " << m_InputPlaceholders.size());
   }
 
   //////////////////////////////////////////////////////////////////////////////////////////
@@ -166,10 +172,21 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   // and other infos (shapes, dtypes)
   // For example, for output names specified by the user m_OutputTensors = ['s2t', 's2t_pad'],
   // this will return m_OutputLayers = ['PartitionedCall:0', 'PartitionedCall:1']
-  // In case the user hasn't named the output, e.g.  m_OutputTensors = [''],
+  // In case the user hasn't named the output, i.e.  m_OutputTensors = [''],
   // this will return the first output m_OutputLayers = ['PartitionedCall:0']
-  tf::GetTensorAttributes(signaturedef.inputs(), m_InputPlaceholders, m_InputLayers, m_InputTensorsShapes, m_InputTensorsDataTypes);
-  tf::GetTensorAttributes(signaturedef.outputs(), m_OutputTensors, m_OutputLayers, m_OutputTensorsShapes, m_OutputTensorsDataTypes);
+  StringList constantsNames;
+  std::transform(m_UserPlaceholders.begin(), m_UserPlaceholders.end(), std::back_inserter(constantsNames),
+                 [](const DictElementType& p) { return p.first; });
+  tf::GetTensorAttributes(
+    signaturedef.inputs(), constantsNames, m_InputConstants, m_InputConstantsShapes, m_InputConstantsDataTypes);
+  tf::GetTensorAttributes(signaturedef.inputs(),
+                          m_InputPlaceholders,
+                          m_InputLayers,
+                          m_InputTensorsShapes,
+                          m_InputTensorsDataTypes,
+                          constantsNames);
+  tf::GetTensorAttributes(
+    signaturedef.outputs(), m_OutputTensors, m_OutputLayers, m_OutputTensorsShapes, m_OutputTensorsDataTypes);
 }
 
 
diff --git a/include/otbTensorflowMultisourceModelFilter.h b/include/otbTensorflowMultisourceModelFilter.h
index 36d781dd1519b964adce741a8adbcb1385b0c729..bdf9a02d0b00e9dbc228f3a0401ac8dab4c49e32 100644
--- a/include/otbTensorflowMultisourceModelFilter.h
+++ b/include/otbTensorflowMultisourceModelFilter.h
@@ -80,12 +80,10 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TOutputImage>
-class ITK_EXPORT TensorflowMultisourceModelFilter :
-public TensorflowMultisourceModelBase<TInputImage, TOutputImage>
+class ITK_EXPORT TensorflowMultisourceModelFilter : public TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelFilter                          Self;
   typedef TensorflowMultisourceModelBase<TInputImage, TOutputImage> Superclass;
@@ -99,16 +97,16 @@ public:
   itkTypeMacro(TensorflowMultisourceModelFilter, TensorflowMultisourceModelBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType           ImageType;
-  typedef typename Superclass::ImagePointerType    ImagePointerType;
-  typedef typename Superclass::PixelType           PixelType;
-  typedef typename Superclass::IndexType           IndexType;
-  typedef typename IndexType::IndexValueType       IndexValueType;
-  typedef typename Superclass::PointType           PointType;
-  typedef typename Superclass::SizeType            SizeType;
-  typedef typename SizeType::SizeValueType         SizeValueType;
-  typedef typename Superclass::SpacingType         SpacingType;
-  typedef typename Superclass::RegionType          RegionType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::PixelType        PixelType;
+  typedef typename Superclass::IndexType        IndexType;
+  typedef typename IndexType::IndexValueType    IndexValueType;
+  typedef typename Superclass::PointType        PointType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename SizeType::SizeValueType      SizeValueType;
+  typedef typename Superclass::SpacingType      SpacingType;
+  typedef typename Superclass::RegionType       RegionType;
 
   typedef TOutputImage                             OutputImageType;
   typedef typename TOutputImage::PixelType         OutputPixelType;
@@ -119,12 +117,12 @@ public:
   typedef typename itk::ImageRegionConstIterator<TInputImage>              InputConstIteratorType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictElementType     DictElementType;
-  typedef typename Superclass::DictType            DictType;
-  typedef typename Superclass::StringList          StringList;
-  typedef typename Superclass::SizeListType        SizeListType;
-  typedef typename Superclass::TensorListType      TensorListType;
-  typedef std::vector<float>                       ScaleListType;
+  typedef typename Superclass::DictElementType DictElementType;
+  typedef typename Superclass::DictType        DictType;
+  typedef typename Superclass::StringList      StringList;
+  typedef typename Superclass::SizeListType    SizeListType;
+  typedef typename Superclass::TensorListType  TensorListType;
+  typedef std::vector<float>                   ScaleListType;
 
   itkSetMacro(OutputGridSize, SizeType);
   itkGetMacro(OutputGridSize, SizeType);
@@ -137,34 +135,43 @@ public:
 
 protected:
   TensorflowMultisourceModelFilter();
-  virtual ~TensorflowMultisourceModelFilter() {};
+  virtual ~TensorflowMultisourceModelFilter(){};
 
-  virtual void SmartPad(RegionType& region, const SizeType &patchSize);
-  virtual void SmartShrink(RegionType& region, const SizeType &patchSize);
-  virtual void ImageToExtent(ImageType* image, PointType &extentInf, PointType &extentSup, SizeType &patchSize);
-  virtual bool OutputRegionToInputRegion(const RegionType &outputRegion, RegionType &inputRegion, ImageType* &inputImage);
-  virtual void EnlargeToAlignedRegion(RegionType& region);
+  virtual void
+  SmartPad(RegionType & region, const SizeType & patchSize);
+  virtual void
+  SmartShrink(RegionType & region, const SizeType & patchSize);
+  virtual void
+  ImageToExtent(ImageType * image, PointType & extentInf, PointType & extentSup, SizeType & patchSize);
+  virtual bool
+  OutputRegionToInputRegion(const RegionType & outputRegion, RegionType & inputRegion, ImageType *& inputImage);
+  virtual void
+  EnlargeToAlignedRegion(RegionType & region);
 
-  virtual void GenerateOutputInformation(void);
+  virtual void
+  GenerateOutputInformation(void);
 
-  virtual void GenerateInputRequestedRegion(void);
+  virtual void
+  GenerateInputRequestedRegion(void);
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
 private:
-  TensorflowMultisourceModelFilter(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelFilter(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  SizeType                   m_OutputGridSize;       // Output grid size
-  bool                       m_ForceOutputGridSize;  // Force output grid size
-  bool                       m_FullyConvolutional;   // Convolution mode
-  float                      m_OutputSpacingScale;   // scaling of the output spacings
+  SizeType m_OutputGridSize;      // Output grid size
+  bool     m_ForceOutputGridSize; // Force output grid size
+  bool     m_FullyConvolutional;  // Convolution mode
+  float    m_OutputSpacingScale;  // scaling of the output spacings
 
   // Internal
-  SpacingType                m_OutputSpacing;     // Output image spacing
-  PointType                  m_OutputOrigin;      // Output image origin
-  SizeType                   m_OutputSize;        // Output image size
-  PixelType                  m_NullPixel;         // Pixel filled with zeros
+  SpacingType m_OutputSpacing; // Output image spacing
+  PointType   m_OutputOrigin;  // Output image origin
+  SizeType    m_OutputSize;    // Output image size
+  PixelType   m_NullPixel;     // Pixel filled with zeros
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelFilter.hxx b/include/otbTensorflowMultisourceModelFilter.hxx
index d208f01a8deb1834b98f64c1230423d44b2cfe7a..3cbb53d92857466d617e5547940c8e42a0ce971e 100644
--- a/include/otbTensorflowMultisourceModelFilter.hxx
+++ b/include/otbTensorflowMultisourceModelFilter.hxx
@@ -18,9 +18,8 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::TensorflowMultisourceModelFilter()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::TensorflowMultisourceModelFilter()
+{
   m_OutputGridSize.Fill(0);
   m_ForceOutputGridSize = false;
   m_FullyConvolutional = false;
@@ -31,38 +30,37 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   m_OutputSpacingScale = 1.0f;
 
-  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max() );
-  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max() );
- }
+  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max());
+  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max());
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::SmartPad(RegionType& region, const SizeType &patchSize)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::SmartPad(RegionType & region, const SizeType & patchSize)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     const SizeValueType psz = patchSize[dim];
     const SizeValueType rval = 0.5 * psz;
     const SizeValueType lval = psz - rval;
     region.GetModifiableIndex()[dim] -= lval;
     region.GetModifiableSize()[dim] += psz;
-    }
- }
+  }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::SmartShrink(RegionType& region, const SizeType &patchSize)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::SmartShrink(RegionType &     region,
+                                                                         const SizeType & patchSize)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     const SizeValueType psz = patchSize[dim];
     const SizeValueType lval = 0.5 * psz;
     region.GetModifiableIndex()[dim] += lval;
     region.GetModifiableSize()[dim] -= psz - 1;
-    }
- }
+  }
+}
 
 /**
   Compute the input image extent: corners inf and sup.
@@ -70,9 +68,11 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::ImageToExtent(ImageType* image, PointType &extentInf, PointType &extentSup, SizeType &patchSize)
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::ImageToExtent(ImageType * image,
+                                                                           PointType & extentInf,
+                                                                           PointType & extentSup,
+                                                                           SizeType &  patchSize)
+{
 
   // Get largest possible region
   RegionType largestPossibleRegion = image->GetLargestPossibleRegion();
@@ -89,13 +89,12 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   PointType imageEnd;
   image->TransformIndexToPhysicalPoint(imageLastIndex, imageEnd);
   image->TransformIndexToPhysicalPoint(imageFirstIndex, imageOrigin);
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     extentInf[dim] = vnl_math_min(imageOrigin[dim], imageEnd[dim]) - 0.5 * image->GetSpacing()[dim];
     extentSup[dim] = vnl_math_max(imageOrigin[dim], imageEnd[dim]) + 0.5 * image->GetSpacing()[dim];
-    }
-
- }
+  }
+}
 
 /**
   Compute the region of the input image which correspond to the given output requested region
@@ -104,9 +103,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 bool
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::OutputRegionToInputRegion(const RegionType &outputRegion, RegionType &inputRegion, ImageType* &inputImage)
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::OutputRegionToInputRegion(const RegionType & outputRegion,
+                                                                                       RegionType &       inputRegion,
+                                                                                       ImageType *&       inputImage)
+{
 
   // Mosaic Region Start & End (mosaic image index)
   const IndexType outIndexStart = outputRegion.GetIndex();
@@ -115,45 +115,43 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   // Mosaic Region Start & End (geo)
   PointType outPointStart, outPointEnd;
   this->GetOutput()->TransformIndexToPhysicalPoint(outIndexStart, outPointStart);
-  this->GetOutput()->TransformIndexToPhysicalPoint(outIndexEnd  , outPointEnd  );
+  this->GetOutput()->TransformIndexToPhysicalPoint(outIndexEnd, outPointEnd);
 
   // Add the half-width pixel size of the input image
   // and remove the half-width pixel size of the output image
   // (coordinates = pixel center)
   const SpacingType outputSpc = this->GetOutput()->GetSpacing();
   const SpacingType inputSpc = inputImage->GetSpacing();
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
-    const typename SpacingType::ValueType border =
-        0.5 * (inputSpc[dim] - outputSpc[dim]);
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
+    const typename SpacingType::ValueType border = 0.5 * (inputSpc[dim] - outputSpc[dim]);
     if (outPointStart[dim] < outPointEnd[dim])
-      {
+    {
       outPointStart[dim] += border;
-      outPointEnd  [dim] -= border;
-      }
+      outPointEnd[dim] -= border;
+    }
     else
-      {
+    {
       outPointStart[dim] -= border;
-      outPointEnd  [dim] += border;
-      }
+      outPointEnd[dim] += border;
     }
+  }
 
   // Mosaic Region Start & End (input image index)
   IndexType defIndexStart, defIndexEnd;
   inputImage->TransformPhysicalPointToIndex(outPointStart, defIndexStart);
-  inputImage->TransformPhysicalPointToIndex(outPointEnd  , defIndexEnd);
+  inputImage->TransformPhysicalPointToIndex(outPointEnd, defIndexEnd);
 
   // Compute input image region
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     inputRegion.SetIndex(dim, vnl_math_min(defIndexStart[dim], defIndexEnd[dim]));
     inputRegion.SetSize(dim, vnl_math_max(defIndexStart[dim], defIndexEnd[dim]) - inputRegion.GetIndex(dim) + 1);
-    }
+  }
 
   // crop the input requested region at the input's largest possible region
-  return inputRegion.Crop( inputImage->GetLargestPossibleRegion() );
-
- }
+  return inputRegion.Crop(inputImage->GetLargestPossibleRegion());
+}
 
 /*
  * Enlarge the given region to the nearest aligned region.
@@ -161,11 +159,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::EnlargeToAlignedRegion(RegionType& region)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::EnlargeToAlignedRegion(RegionType & region)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     // Get corners
     IndexValueType lower = region.GetIndex(dim);
     IndexValueType upper = lower + region.GetSize(dim);
@@ -177,22 +174,20 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     // Move corners to aligned positions
     lower -= deltaLo;
     if (deltaUp > 0)
-      {
+    {
       upper += m_OutputGridSize[dim] - deltaUp;
-      }
+    }
 
     // Update region
     region.SetIndex(dim, lower);
     region.SetSize(dim, upper - lower);
-
-    }
- }
+  }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateOutputInformation()
+{
 
   Superclass::GenerateOutputInformation();
 
@@ -204,8 +199,8 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   // OTBTF assumes that the output image has the following geometric properties:
   // (1) Image origin is the top-left pixel
   // (2) Image pixel spacing has positive x-spacing and negative y-spacing
-  m_OutputSpacing = this->GetInput(0)->GetSpacing();  // GetSpacing() returns abs. spacing
-  m_OutputSpacing[1] *= -1.0;  // Force negative y-spacing
+  m_OutputSpacing = this->GetInput(0)->GetSpacing(); // GetSpacing() returns abs. spacing
+  m_OutputSpacing[1] *= -1.0;                        // Force negative y-spacing
   m_OutputSpacing[0] *= m_OutputSpacingScale;
   m_OutputSpacing[1] *= m_OutputSpacingScale;
 
@@ -214,30 +209,32 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   PointType extentInf, extentSup;
   extentSup.Fill(itk::NumericTraits<double>::max());
   extentInf.Fill(itk::NumericTraits<double>::NonpositiveMin());
-  for (unsigned int imageIndex = 0 ; imageIndex < this->GetNumberOfInputs() ; imageIndex++)
-    {
-    ImageType * currentImage = static_cast<ImageType *>(
-        Superclass::ProcessObject::GetInput(imageIndex) );
+  for (unsigned int imageIndex = 0; imageIndex < this->GetNumberOfInputs(); imageIndex++)
+  {
+    ImageType * currentImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(imageIndex));
 
     // Update output image extent
     PointType currentInputImageExtentInf, currentInputImageExtentSup;
-    ImageToExtent(currentImage, currentInputImageExtentInf, currentInputImageExtentSup, this->GetInputReceptiveFields()[imageIndex]);
-    for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-      {
+    ImageToExtent(currentImage,
+                  currentInputImageExtentInf,
+                  currentInputImageExtentSup,
+                  this->GetInputReceptiveFields()[imageIndex]);
+    for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+    {
       extentInf[dim] = vnl_math_max(currentInputImageExtentInf[dim], extentInf[dim]);
       extentSup[dim] = vnl_math_min(currentInputImageExtentSup[dim], extentSup[dim]);
-      }
     }
+  }
 
 
   // Set final origin, aligned to the reference image grid.
   // Here we simply get back to the center of the pixel (extents are pixels corners coordinates)
-  m_OutputOrigin[0] =  extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0];
-  m_OutputOrigin[1] =  extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1];
+  m_OutputOrigin[0] = extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0];
+  m_OutputOrigin[1] = extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1];
 
   // Set final size
-  m_OutputSize[0] = std::floor( (extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]) );
-  m_OutputSize[1] = std::floor( (extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]) );
+  m_OutputSize[0] = std::floor((extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]));
+  m_OutputSize[1] = std::floor((extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]));
 
   // We should take in account one more thing: the expression field. It enlarge slightly the output image extent.
   m_OutputOrigin[0] -= m_OutputSpacing[0] * std::floor(0.5 * this->GetOutputExpressionFields().at(0)[0]);
@@ -247,18 +244,18 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   // Set output grid size
   if (!m_ForceOutputGridSize)
-    {
+  {
     // Default is the output field of expression
     m_OutputGridSize = this->GetOutputExpressionFields().at(0);
-    }
+  }
 
   // Resize the largestPossibleRegion to be a multiple of the grid size
-  for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+  {
     if (m_OutputGridSize[dim] > m_OutputSize[dim])
       itkGenericExceptionMacro("Output grid size is larger than output image size !");
     m_OutputSize[dim] -= m_OutputSize[dim] % m_OutputGridSize[dim];
-    }
+  }
 
   // Set the largest possible region
   RegionType largestPossibleRegion;
@@ -269,38 +266,39 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   //////////////////////////////////////////////////////////////////////////////////////////
 
   unsigned int outputPixelSize = 0;
-  for (auto& protoShape: this->GetOutputTensorsShapes())
-    {
+  for (auto & protoShape : this->GetOutputTensorsShapes())
+  {
     // Find the number of components
     if (protoShape.dim_size() > 4)
-      {
-      itkExceptionMacro("dim_size=" << protoShape.dim_size() << " currently not supported. "
-          "Keep in mind that output tensors must have 1, 2, 3 or 4 dimensions. "
-          "In the case of 1-dimensional tensor, the first dimension is for the batch, "
-          "and we assume that the output tensor has 1 channel. "
-          "In the case of 2-dimensional tensor, the first dimension is for the batch, "
-          "and the second is the number of components. "
-          "In the case of 3-dimensional tensor, the first dimension is for the batch, "
-          "and other dims are for (x, y). "
-          "In the case of 4-dimensional tensor, the first dimension is for the batch, "
-          "and the second and the third are for (x, y). The last is for the number of "
-          "channels. ");
-      }
+    {
+      itkExceptionMacro("dim_size=" << protoShape.dim_size()
+                                    << " currently not supported. "
+                                       "Keep in mind that output tensors must have 1, 2, 3 or 4 dimensions. "
+                                       "In the case of 1-dimensional tensor, the first dimension is for the batch, "
+                                       "and we assume that the output tensor has 1 channel. "
+                                       "In the case of 2-dimensional tensor, the first dimension is for the batch, "
+                                       "and the second is the number of components. "
+                                       "In the case of 3-dimensional tensor, the first dimension is for the batch, "
+                                       "and other dims are for (x, y). "
+                                       "In the case of 4-dimensional tensor, the first dimension is for the batch, "
+                                       "and the second and the third are for (x, y). The last is for the number of "
+                                       "channels. ");
+    }
     unsigned int nComponents = tf::GetNumberOfChannelsFromShapeProto(protoShape);
     outputPixelSize += nComponents;
-    }
+  }
 
   // Copy input image projection
-  ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(0) );
+  ImageType *       inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(0));
   const std::string projectionRef = inputImage->GetProjectionRef();
 
   // Set output image origin/spacing/size/projection
   ImageType * outputPtr = this->GetOutput();
   outputPtr->SetNumberOfComponentsPerPixel(outputPixelSize);
-  outputPtr->SetProjectionRef        ( projectionRef );
-  outputPtr->SetOrigin               ( m_OutputOrigin );
-  outputPtr->SetSignedSpacing        ( m_OutputSpacing );
-  outputPtr->SetLargestPossibleRegion( largestPossibleRegion );
+  outputPtr->SetProjectionRef(projectionRef);
+  outputPtr->SetOrigin(m_OutputOrigin);
+  outputPtr->SetSignedSpacing(m_OutputSpacing);
+  outputPtr->SetLargestPossibleRegion(largestPossibleRegion);
 
   // Set null pixel
   m_NullPixel.SetSize(outputPtr->GetNumberOfComponentsPerPixel());
@@ -312,14 +310,12 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   itk::EncapsulateMetaData(outputPtr->GetMetaDataDictionary(), MetaDataKey::TileHintX, m_OutputGridSize[0]);
   itk::EncapsulateMetaData(outputPtr->GetMetaDataDictionary(), MetaDataKey::TileHintY, m_OutputGridSize[1]);
-
- }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateInputRequestedRegion()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateInputRequestedRegion()
+{
   Superclass::GenerateInputRequestedRegion();
 
   // Output requested region
@@ -329,35 +325,37 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   EnlargeToAlignedRegion(requestedRegion);
 
   // For each image, get the requested region
-  for(unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
-    {
-    ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(i) );
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
+  {
+    ImageType * inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(i));
 
     // Compute the requested region
     RegionType inRegion;
-    if (!OutputRegionToInputRegion(requestedRegion, inRegion, inputImage) )
-      {
+    if (!OutputRegionToInputRegion(requestedRegion, inRegion, inputImage))
+    {
       // Image does not overlap requested region: set requested region to null
-      otbLogMacro(Debug,  << "Image #" << i << " :\n" << inRegion << " is outside the requested region");
+      otbLogMacro(Debug, << "Image #" << i << " :\n" << inRegion << " is outside the requested region");
       inRegion.GetModifiableIndex().Fill(0);
       inRegion.GetModifiableSize().Fill(0);
-      }
+    }
 
     // Compute the FOV-scale*FOE radius to pad
     SizeType toPad(this->GetInputReceptiveFields().at(i));
-    for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-      {
-      int valToPad = 1 + (this->GetOutputExpressionFields().at(0)[dim] - 1) * m_OutputSpacingScale * this->GetInput(0)->GetSpacing()[dim] / this->GetInput(i)->GetSpacing()[dim] ;
+    for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+    {
+      int valToPad = 1 + (this->GetOutputExpressionFields().at(0)[dim] - 1) * m_OutputSpacingScale *
+                           this->GetInput(0)->GetSpacing()[dim] / this->GetInput(i)->GetSpacing()[dim];
       if (valToPad > toPad[dim])
-        itkExceptionMacro("The input requested region of source #" << i << " is not consistent (dim "<< dim<< ")." <<
-                          "Please check RF, EF, SF vs physical spacing of your image!" <<
-                          "\nReceptive field: " << this->GetInputReceptiveFields().at(i)[dim] <<
-                          "\nExpression field: " << this->GetOutputExpressionFields().at(0)[dim] <<
-                          "\nScale factor: " << m_OutputSpacingScale <<
-                          "\nReference image spacing: " << this->GetInput(0)->GetSpacing()[dim] <<
-                          "\nImage " << i << " spacing: " << this->GetInput(i)->GetSpacing()[dim]);
+        itkExceptionMacro("The input requested region of source #"
+                          << i << " is not consistent (dim " << dim << ")."
+                          << "Please check RF, EF, SF vs physical spacing of your image!"
+                          << "\nReceptive field: " << this->GetInputReceptiveFields().at(i)[dim]
+                          << "\nExpression field: " << this->GetOutputExpressionFields().at(0)[dim]
+                          << "\nScale factor: " << m_OutputSpacingScale
+                          << "\nReference image spacing: " << this->GetInput(0)->GetSpacing()[dim] << "\nImage " << i
+                          << " spacing: " << this->GetInput(i)->GetSpacing()[dim]);
       toPad[dim] -= valToPad;
-      }
+    }
 
     // Pad with radius
     SmartPad(inRegion, toPad);
@@ -368,30 +366,28 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     // can be one pixel larger when the input image regions are not physically
     // aligned.
     if (!m_FullyConvolutional)
-      {
+    {
       inRegion.PadByRadius(1);
-      }
+    }
 
     inRegion.Crop(inputImage->GetLargestPossibleRegion());
 
     // Update the requested region
     inputImage->SetRequestedRegion(inRegion);
 
-    } // next image
-
- }
+  } // next image
+}
 
 /**
  * Compute the output image
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateData()
+{
   // Output pointer and requested region
   typename TOutputImage::Pointer outputPtr = this->GetOutput();
-  const RegionType outputReqRegion = outputPtr->GetRequestedRegion();
+  const RegionType               outputReqRegion = outputPtr->GetRequestedRegion();
 
   // Get the aligned output requested region
   RegionType outputAlignedReqRegion(outputReqRegion);
@@ -404,10 +400,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
 
   // Populate input tensors
-  for (unsigned int i = 0 ; i < nInputs ; i++)
-    {
+  for (unsigned int i = 0; i < nInputs; i++)
+  {
     // Input image pointer
-    const ImagePointerType inputPtr = const_cast<TInputImage*>(this->GetInput(i));
+    const ImagePointerType inputPtr = const_cast<TInputImage *>(this->GetInput(i));
 
     // Patch size of tensor #i
     const SizeType inputPatchSize = this->GetInputReceptiveFields().at(i);
@@ -416,13 +412,13 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     const RegionType reqRegion = inputPtr->GetRequestedRegion();
 
     if (m_FullyConvolutional)
-      {
+    {
       // Shape of input tensor #i
-      tensorflow::int64 sz_n = 1;
-      tensorflow::int64 sz_y = reqRegion.GetSize(1);
-      tensorflow::int64 sz_x = reqRegion.GetSize(0);
-      tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-      tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
+      tensorflow::int64       sz_n = 1;
+      tensorflow::int64       sz_y = reqRegion.GetSize(1);
+      tensorflow::int64       sz_x = reqRegion.GetSize(0);
+      tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+      tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
 
       // Create the input tensor
       tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
@@ -433,16 +429,16 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
       // Input is the tensor representing the subset of image
       DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
       inputs.push_back(input);
-      }
+    }
     else
-      {
+    {
       // Preparing patches
       // Shape of input tensor #i
-      tensorflow::int64 sz_n = outputReqRegion.GetNumberOfPixels();
-      tensorflow::int64 sz_y = inputPatchSize[1];
-      tensorflow::int64 sz_x = inputPatchSize[0];
-      tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-      tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
+      tensorflow::int64       sz_n = outputReqRegion.GetNumberOfPixels();
+      tensorflow::int64       sz_y = inputPatchSize[1];
+      tensorflow::int64       sz_x = inputPatchSize[0];
+      tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+      tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
 
       // Create the input tensor
       tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
@@ -450,10 +446,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
       // Fill the input tensor.
       // We iterate over points which are located from the index iterator
       // moving through the output image requested region
-      unsigned int elemIndex = 0;
+      unsigned int      elemIndex = 0;
       IndexIteratorType idxIt(outputPtr, outputReqRegion);
       for (idxIt.GoToBegin(); !idxIt.IsAtEnd(); ++idxIt)
-        {
+      {
         // Get the coordinates of the current output pixel
         PointType point;
         outputPtr->TransformIndexToPhysicalPoint(idxIt.GetIndex(), point);
@@ -461,15 +457,15 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
         // Sample the i-th input patch centered on the point
         tf::SampleCenteredPatch<TInputImage>(inputPtr, point, inputPatchSize, inputTensor, elemIndex);
         elemIndex++;
-        }
+      }
 
       // Input is the tensor of patches (aka the batch)
       DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
       inputs.push_back(input);
 
-      } // mode is not full convolutional
+    } // mode is not full convolutional
 
-    } // next input tensor
+  } // next input tensor
 
   // Run session
   // TODO: see if we print some info about inputs/outputs of the model e.g. m_OutputTensors
@@ -483,26 +479,25 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   // Get output tensors
   int bandOffset = 0;
-  for (unsigned int i = 0 ; i < outputs.size() ; i++)
-    {
+  for (unsigned int i = 0; i < outputs.size(); i++)
+  {
     // The offset (i.e. the starting index of the channel for the output tensor) is updated
     // during this call
-    // TODO: implement a generic strategy enabling expression field copy in patch-based mode (see tf::CopyTensorToImageRegion)
+    // TODO: implement a generic strategy enabling expression field copy in patch-based mode (see
+    // tf::CopyTensorToImageRegion)
     try
-      {
-      tf::CopyTensorToImageRegion<TOutputImage> (outputs[i],
-          outputAlignedReqRegion, outputPtr, outputReqRegion, bandOffset);
-      }
-    catch( itk::ExceptionObject & err )
-      {
+    {
+      tf::CopyTensorToImageRegion<TOutputImage>(
+        outputs[i], outputAlignedReqRegion, outputPtr, outputReqRegion, bandOffset);
+    }
+    catch (itk::ExceptionObject & err)
+    {
       std::stringstream debugMsg = this->GenerateDebugReport(inputs);
       itkExceptionMacro("Error occurred during tensor to image conversion.\n"
-          << "Context: " << debugMsg.str()
-          << "Error:" << err);
-      }
+                        << "Context: " << debugMsg.str() << "Error:" << err);
     }
-
- }
+  }
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelLearningBase.h b/include/otbTensorflowMultisourceModelLearningBase.h
index 0663f17a3f6367d5f5fe0ebbc76b1ca71d64957d..6e01317db89d235e7ddae6740136d28f6470cc59 100644
--- a/include/otbTensorflowMultisourceModelLearningBase.h
+++ b/include/otbTensorflowMultisourceModelLearningBase.h
@@ -53,37 +53,35 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelLearningBase :
-public TensorflowMultisourceModelBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelLearningBase : public TensorflowMultisourceModelBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowMultisourceModelLearningBase       Self;
-  typedef TensorflowMultisourceModelBase<TInputImage>  Superclass;
-  typedef itk::SmartPointer<Self>                      Pointer;
-  typedef itk::SmartPointer<const Self>                ConstPointer;
+  typedef TensorflowMultisourceModelLearningBase      Self;
+  typedef TensorflowMultisourceModelBase<TInputImage> Superclass;
+  typedef itk::SmartPointer<Self>                     Pointer;
+  typedef itk::SmartPointer<const Self>               ConstPointer;
 
   /** Run-time type information (and related methods). */
   itkTypeMacro(TensorflowMultisourceModelLearningBase, TensorflowMultisourceModelBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType         ImageType;
-  typedef typename Superclass::ImagePointerType  ImagePointerType;
-  typedef typename Superclass::RegionType        RegionType;
-  typedef typename Superclass::SizeType          SizeType;
-  typedef typename Superclass::IndexType         IndexType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::RegionType       RegionType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename Superclass::IndexType        IndexType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::DictElementType   DictElementType;
-  typedef typename Superclass::StringList        StringList;
-  typedef typename Superclass::SizeListType      SizeListType;
-  typedef typename Superclass::TensorListType    TensorListType;
+  typedef typename Superclass::DictType        DictType;
+  typedef typename Superclass::DictElementType DictElementType;
+  typedef typename Superclass::StringList      StringList;
+  typedef typename Superclass::SizeListType    SizeListType;
+  typedef typename Superclass::TensorListType  TensorListType;
 
   /* Typedefs for index */
-  typedef typename ImageType::IndexValueType     IndexValueType;
-  typedef std::vector<IndexValueType>            IndexListType;
+  typedef typename ImageType::IndexValueType IndexValueType;
+  typedef std::vector<IndexValueType>        IndexListType;
 
   // Batch size
   itkSetMacro(BatchSize, IndexValueType);
@@ -98,29 +96,36 @@ public:
 
 protected:
   TensorflowMultisourceModelLearningBase();
-  virtual ~TensorflowMultisourceModelLearningBase() {};
+  virtual ~TensorflowMultisourceModelLearningBase(){};
 
-  virtual void GenerateOutputInformation(void) override;
+  virtual void
+  GenerateOutputInformation(void) override;
 
-  virtual void GenerateInputRequestedRegion();
+  virtual void
+  GenerateInputRequestedRegion();
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
-  virtual void PopulateInputTensors(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize, const IndexListType & order);
+  virtual void
+  PopulateInputTensors(DictType &             inputs,
+                       const IndexValueType & sampleStart,
+                       const IndexValueType & batchSize,
+                       const IndexListType &  order);
 
-  virtual void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize) = 0;
+  virtual void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize) = 0;
 
 private:
-  TensorflowMultisourceModelLearningBase(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelLearningBase(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  unsigned int          m_BatchSize;       // Batch size
-  bool                  m_UseStreaming;    // Use streaming on/off
+  unsigned int m_BatchSize;    // Batch size
+  bool         m_UseStreaming; // Use streaming on/off
 
   // Read only
-  IndexValueType        m_NumberOfSamples; // Number of samples
+  IndexValueType m_NumberOfSamples; // Number of samples
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelLearningBase.hxx b/include/otbTensorflowMultisourceModelLearningBase.hxx
index 28b2328b8b82c49896ed40a1edd18fba5cebd7a7..bfa26d4dc3789a18879891fc9df9581a03fb4d1a 100644
--- a/include/otbTensorflowMultisourceModelLearningBase.hxx
+++ b/include/otbTensorflowMultisourceModelLearningBase.hxx
@@ -18,39 +18,38 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelLearningBase<TInputImage>
-::TensorflowMultisourceModelLearningBase(): m_BatchSize(100),
-m_UseStreaming(false), m_NumberOfSamples(0)
- {
- }
+TensorflowMultisourceModelLearningBase<TInputImage>::TensorflowMultisourceModelLearningBase()
+  : m_BatchSize(100)
+  , m_UseStreaming(false)
+  , m_NumberOfSamples(0)
+{}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateOutputInformation()
+{
   Superclass::GenerateOutputInformation();
 
   // Set an empty output buffered region
   ImageType * outputPtr = this->GetOutput();
-  RegionType nullRegion;
+  RegionType  nullRegion;
   nullRegion.GetModifiableSize().Fill(1);
   outputPtr->SetNumberOfComponentsPerPixel(1);
-  outputPtr->SetLargestPossibleRegion( nullRegion );
+  outputPtr->SetLargestPossibleRegion(nullRegion);
 
   // Count the number of samples
   m_NumberOfSamples = 0;
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
-    {
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
+  {
     // Input image pointer
-    ImagePointerType inputPtr = const_cast<ImageType*>(this->GetInput(i));
+    ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
 
     // Make sure input is available
-    if ( inputPtr.IsNull() )
-      {
+    if (inputPtr.IsNull())
+    {
       itkExceptionMacro(<< "Input " << i << " is null!");
-      }
+    }
 
     // Update input information
     inputPtr->UpdateOutputInformation();
@@ -63,67 +62,62 @@ TensorflowMultisourceModelLearningBase<TInputImage>
 
     // Check size X
     if (inputPatchSize[0] != reqRegion.GetSize(0))
-      itkExceptionMacro("Patch size for input " << i
-          << " is " << inputPatchSize
-          << " but input patches image size is " << reqRegion.GetSize());
+      itkExceptionMacro("Patch size for input " << i << " is " << inputPatchSize << " but input patches image size is "
+                                                << reqRegion.GetSize());
 
     // Check size Y
     if (reqRegion.GetSize(1) % inputPatchSize[1] != 0)
       itkExceptionMacro("Input patches image must have a number of rows which is "
-          << "a multiple of the patch size Y! Patches image has " << reqRegion.GetSize(1)
-          << " rows but patch size Y is " <<  inputPatchSize[1] << " for input " << i);
+                        << "a multiple of the patch size Y! Patches image has " << reqRegion.GetSize(1)
+                        << " rows but patch size Y is " << inputPatchSize[1] << " for input " << i);
 
     // Get the batch size
     const IndexValueType currNumberOfSamples = reqRegion.GetSize(1) / inputPatchSize[1];
 
     // Check the consistency with other inputs
     if (m_NumberOfSamples == 0)
-      {
+    {
       m_NumberOfSamples = currNumberOfSamples;
-      }
+    }
     else if (m_NumberOfSamples != currNumberOfSamples)
-      {
-      itkGenericExceptionMacro("Batch size of input " << (i-1)
-          << " was " << m_NumberOfSamples
-          << " but input " << i
-          << " has a batch size of " << currNumberOfSamples );
-      }
-    } // next input
- }
+    {
+      itkGenericExceptionMacro("Batch size of input " << (i - 1) << " was " << m_NumberOfSamples << " but input " << i
+                                                      << " has a batch size of " << currNumberOfSamples);
+    }
+  } // next input
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateInputRequestedRegion()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateInputRequestedRegion()
+{
   Superclass::GenerateInputRequestedRegion();
 
   // For each image, set the requested region
   RegionType nullRegion;
-  for(unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
-    {
-    ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(i) );
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
+  {
+    ImageType * inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(i));
 
     // If the streaming is enabled, we don't read the full image
     if (m_UseStreaming)
-      {
+    {
       inputImage->SetRequestedRegion(nullRegion);
-      }
+    }
     else
-      {
+    {
       inputImage->SetRequestedRegion(inputImage->GetLargestPossibleRegion());
-      }
-    } // next image
- }
+    }
+  } // next image
+}
 
 /**
  *
  */
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateData()
+{
 
   // Batches loop
   const IndexValueType nBatches = std::ceil(m_NumberOfSamples / m_BatchSize);
@@ -131,15 +125,15 @@ TensorflowMultisourceModelLearningBase<TInputImage>
 
   itk::ProgressReporter progress(this, 0, nBatches);
 
-  for (IndexValueType batch = 0 ; batch < nBatches ; batch++)
-    {
+  for (IndexValueType batch = 0; batch < nBatches; batch++)
+  {
 
     // Feed dict
     DictType inputs;
 
     // Batch start and size
     const IndexValueType sampleStart = batch * m_BatchSize;
-    IndexValueType batchSize = m_BatchSize;
+    IndexValueType       batchSize = m_BatchSize;
     if (rest != 0 && batch == nBatches - 1)
     {
       batchSize = rest;
@@ -149,40 +143,40 @@ TensorflowMultisourceModelLearningBase<TInputImage>
     this->ProcessBatch(inputs, sampleStart, batchSize);
 
     progress.CompletedPixel();
-    } // Next batch
-
- }
+  } // Next batch
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::PopulateInputTensors(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize, const IndexListType & order)
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::PopulateInputTensors(DictType &             inputs,
+                                                                          const IndexValueType & sampleStart,
+                                                                          const IndexValueType & batchSize,
+                                                                          const IndexListType &  order)
+{
   const bool reorder = order.size();
 
   // Populate input tensors
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
-    {
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
+  {
     // Input image pointer
-    ImagePointerType inputPtr = const_cast<ImageType*>(this->GetInput(i));
+    ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
 
     // Patch size of tensor #i
     const SizeType inputPatchSize = this->GetInputReceptiveFields().at(i);
 
     // Create the tensor for the batch
-    const tensorflow::int64 sz_n = batchSize;
-    const tensorflow::int64 sz_y = inputPatchSize[1];
-    const tensorflow::int64 sz_x = inputPatchSize[0];
-    const tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-    const tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
-    tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
+    const tensorflow::int64       sz_n = batchSize;
+    const tensorflow::int64       sz_y = inputPatchSize[1];
+    const tensorflow::int64       sz_x = inputPatchSize[0];
+    const tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+    const tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
+    tensorflow::Tensor            inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
 
     // Populate the tensor
-    for (IndexValueType elem = 0 ; elem < batchSize ; elem++)
-      {
+    for (IndexValueType elem = 0; elem < batchSize; elem++)
+    {
       const tensorflow::uint64 samplePos = sampleStart + elem;
-      IndexType start;
+      IndexType                start;
       start[0] = 0;
       if (reorder)
       {
@@ -190,7 +184,8 @@ TensorflowMultisourceModelLearningBase<TInputImage>
       }
       else
       {
-        start[1] = samplePos * sz_y;;
+        start[1] = samplePos * sz_y;
+        ;
       }
       RegionType patchRegion(start, inputPatchSize);
       if (m_UseStreaming)
@@ -198,14 +193,14 @@ TensorflowMultisourceModelLearningBase<TInputImage>
         // If streaming is enabled, we need to explicitly propagate requested region
         tf::PropagateRequestedRegion<TInputImage>(inputPtr, patchRegion);
       }
-      tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, patchRegion, inputTensor, elem );
-      }
+      tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, patchRegion, inputTensor, elem);
+    }
 
     // Input #i : the tensor of patches (aka the batch)
     DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
     inputs.push_back(input);
-    } // next input tensor
- }
+  } // next input tensor
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelTrain.h b/include/otbTensorflowMultisourceModelTrain.h
index 8ec4c38c369d532a706746c9674197ad766f657b..694f09e0b0ebfdd65305432a602e9f3908c8eadf 100644
--- a/include/otbTensorflowMultisourceModelTrain.h
+++ b/include/otbTensorflowMultisourceModelTrain.h
@@ -34,11 +34,9 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelTrain :
-public TensorflowMultisourceModelLearningBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelTrain : public TensorflowMultisourceModelLearningBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelTrain                     Self;
   typedef TensorflowMultisourceModelLearningBase<TInputImage> Superclass;
@@ -52,25 +50,27 @@ public:
   itkTypeMacro(TensorflowMultisourceModelTrain, TensorflowMultisourceModelLearningBase);
 
   /** Superclass typedefs */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::TensorListType    TensorListType;
-  typedef typename Superclass::IndexValueType    IndexValueType;
-  typedef typename Superclass::IndexListType     IndexListType;
+  typedef typename Superclass::DictType       DictType;
+  typedef typename Superclass::TensorListType TensorListType;
+  typedef typename Superclass::IndexValueType IndexValueType;
+  typedef typename Superclass::IndexListType  IndexListType;
 
 
 protected:
   TensorflowMultisourceModelTrain();
-  virtual ~TensorflowMultisourceModelTrain() {};
+  virtual ~TensorflowMultisourceModelTrain(){};
 
-  virtual void GenerateData();
-  virtual void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize);
+  virtual void
+  GenerateData();
+  virtual void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize);
 
 private:
-  TensorflowMultisourceModelTrain(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelTrain(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  IndexListType     m_RandomIndices;           // Reordered indices
+  IndexListType m_RandomIndices; // Reordered indices
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelTrain.hxx b/include/otbTensorflowMultisourceModelTrain.hxx
index 272dd6390668bd233c5ec41b99ff2b088ef313c3..46bc2d7bd22cab4a90a40131436bd428dc77aff9 100644
--- a/include/otbTensorflowMultisourceModelTrain.hxx
+++ b/include/otbTensorflowMultisourceModelTrain.hxx
@@ -18,37 +18,33 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelTrain<TInputImage>
-::TensorflowMultisourceModelTrain()
- {
- }
+TensorflowMultisourceModelTrain<TInputImage>::TensorflowMultisourceModelTrain()
+{}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelTrain<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelTrain<TInputImage>::GenerateData()
+{
 
   // Initial sequence 1...N
   m_RandomIndices.resize(this->GetNumberOfSamples());
-  std::iota (std::begin(m_RandomIndices), std::end(m_RandomIndices), 0);
+  std::iota(std::begin(m_RandomIndices), std::end(m_RandomIndices), 0);
 
   // Shuffle the sequence
   std::random_device rd;
-  std::mt19937 g(rd());
+  std::mt19937       g(rd());
   std::shuffle(m_RandomIndices.begin(), m_RandomIndices.end(), g);
 
   // Call the generic method
   Superclass::GenerateData();
-
- }
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelTrain<TInputImage>
-::ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize)
- {
+TensorflowMultisourceModelTrain<TInputImage>::ProcessBatch(DictType &             inputs,
+                                                           const IndexValueType & sampleStart,
+                                                           const IndexValueType & batchSize)
+{
   // Populate input tensors
   this->PopulateInputTensors(inputs, sampleStart, batchSize, m_RandomIndices);
 
@@ -57,12 +53,11 @@ TensorflowMultisourceModelTrain<TInputImage>
   this->RunSession(inputs, outputs);
 
   // Display outputs tensors
-  for (auto& o: outputs)
+  for (auto & o : outputs)
   {
     tf::PrintTensorInfos(o);
   }
-
- }
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelValidate.h b/include/otbTensorflowMultisourceModelValidate.h
index 322f6a24e288db9d9acf202e72ffe04ff8d8a8d4..54691747a7128d625c4a637972da67d02f11e1e1 100644
--- a/include/otbTensorflowMultisourceModelValidate.h
+++ b/include/otbTensorflowMultisourceModelValidate.h
@@ -42,11 +42,9 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelValidate :
-public TensorflowMultisourceModelLearningBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelValidate : public TensorflowMultisourceModelLearningBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelValidate                  Self;
   typedef TensorflowMultisourceModelLearningBase<TInputImage> Superclass;
@@ -60,20 +58,20 @@ public:
   itkTypeMacro(TensorflowMultisourceModelValidate, TensorflowMultisourceModelLearningBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType         ImageType;
-  typedef typename Superclass::ImagePointerType  ImagePointerType;
-  typedef typename Superclass::RegionType        RegionType;
-  typedef typename Superclass::SizeType          SizeType;
-  typedef typename Superclass::IndexType         IndexType;
-  typedef std::vector<ImagePointerType>          ImageListType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::RegionType       RegionType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename Superclass::IndexType        IndexType;
+  typedef std::vector<ImagePointerType>         ImageListType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::StringList        StringList;
-  typedef typename Superclass::SizeListType      SizeListType;
-  typedef typename Superclass::TensorListType    TensorListType;
-  typedef typename Superclass::IndexValueType    IndexValueType;
-  typedef typename Superclass::IndexListType     IndexListType;
+  typedef typename Superclass::DictType       DictType;
+  typedef typename Superclass::StringList     StringList;
+  typedef typename Superclass::SizeListType   SizeListType;
+  typedef typename Superclass::TensorListType TensorListType;
+  typedef typename Superclass::IndexValueType IndexValueType;
+  typedef typename Superclass::IndexListType  IndexListType;
 
   /* Typedefs for validation */
   typedef unsigned long                            CountValueType;
@@ -87,36 +85,43 @@ public:
   typedef itk::ImageRegionConstIterator<ImageType> IteratorType;
 
   /** Set and Get the input references */
-  virtual void SetInputReferences(ImageListType input);
-  ImagePointerType GetInputReference(unsigned int index);
+  virtual void
+  SetInputReferences(ImageListType input);
+  ImagePointerType
+  GetInputReference(unsigned int index);
 
   /** Get the confusion matrix */
-  const ConfMatType GetConfusionMatrix(unsigned int target);
+  const ConfMatType
+  GetConfusionMatrix(unsigned int target);
 
   /** Get the map of classes matrix */
-  const MapOfClassesType GetMapOfClasses(unsigned int target);
+  const MapOfClassesType
+  GetMapOfClasses(unsigned int target);
 
 protected:
   TensorflowMultisourceModelValidate();
-  virtual ~TensorflowMultisourceModelValidate() {};
+  virtual ~TensorflowMultisourceModelValidate(){};
 
-  void GenerateOutputInformation(void);
-  void GenerateData();
-  void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize);
+  void
+  GenerateOutputInformation(void);
+  void
+  GenerateData();
+  void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize);
 
 private:
-  TensorflowMultisourceModelValidate(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelValidate(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  ImageListType              m_References;              // The references images
+  ImageListType m_References; // The references images
 
   // Read only
-  ConfMatListType            m_ConfusionMatrices;       // Confusion matrix
-  MapOfClassesListType       m_MapsOfClasses;           // Maps of classes
+  ConfMatListType      m_ConfusionMatrices; // Confusion matrix
+  MapOfClassesListType m_MapsOfClasses;     // Maps of classes
 
   // Internal
-  std::vector<MatMapType>    m_ConfMatMaps;             // Accumulators
+  std::vector<MatMapType> m_ConfMatMaps; // Accumulators
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelValidate.hxx b/include/otbTensorflowMultisourceModelValidate.hxx
index 8ec685ba81c1ae51111a077e8170dd227be7241e..a929aa884ea97ff3f80af968b275dc1029fb0de4 100644
--- a/include/otbTensorflowMultisourceModelValidate.hxx
+++ b/include/otbTensorflowMultisourceModelValidate.hxx
@@ -18,82 +18,77 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelValidate<TInputImage>
-::TensorflowMultisourceModelValidate()
- {
- }
+TensorflowMultisourceModelValidate<TInputImage>::TensorflowMultisourceModelValidate()
+{}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelValidate<TInputImage>::GenerateOutputInformation()
+{
   Superclass::GenerateOutputInformation();
 
   // Check that there is some reference
   const unsigned int nbOfRefs = m_References.size();
   if (nbOfRefs == 0)
-    {
+  {
     itkExceptionMacro("No reference is set");
-    }
+  }
 
   // Check the number of references
   SizeListType outputPatchSizes = this->GetOutputExpressionFields();
   if (nbOfRefs != outputPatchSizes.size())
-    {
-    itkExceptionMacro("There is " << nbOfRefs << " references but only " <<
-                      outputPatchSizes.size() << " output patch sizes");
-    }
+  {
+    itkExceptionMacro("There is " << nbOfRefs << " references but only " << outputPatchSizes.size()
+                                  << " output patch sizes");
+  }
 
   // Check reference image infos
-  for (unsigned int i = 0 ; i < nbOfRefs ; i++)
-    {
-    const SizeType outputPatchSize = outputPatchSizes[i];
+  for (unsigned int i = 0; i < nbOfRefs; i++)
+  {
+    const SizeType   outputPatchSize = outputPatchSizes[i];
     const RegionType refRegion = m_References[i]->GetLargestPossibleRegion();
     if (refRegion.GetSize(0) != outputPatchSize[0])
-      {
-      itkExceptionMacro("Reference image " << i << " width is " << refRegion.GetSize(0) <<
-                        " but patch size (x) is " << outputPatchSize[0]);
-      }
+    {
+      itkExceptionMacro("Reference image " << i << " width is " << refRegion.GetSize(0) << " but patch size (x) is "
+                                           << outputPatchSize[0]);
+    }
     if (refRegion.GetSize(1) != this->GetNumberOfSamples() * outputPatchSize[1])
-      {
-      itkExceptionMacro("Reference image " << i << " height is " << refRegion.GetSize(1) <<
-                        " but patch size (y) is " << outputPatchSize[1] <<
-                        " which is not consistent with the number of samples (" << this->GetNumberOfSamples() << ")");
-      }
+    {
+      itkExceptionMacro("Reference image "
+                        << i << " height is " << refRegion.GetSize(1) << " but patch size (y) is " << outputPatchSize[1]
+                        << " which is not consistent with the number of samples (" << this->GetNumberOfSamples()
+                        << ")");
     }
-
- }
+  }
+}
 
 
 /*
  * Set the references images
  */
-template<class TInputImage>
+template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::SetInputReferences(ImageListType input)
- {
+TensorflowMultisourceModelValidate<TInputImage>::SetInputReferences(ImageListType input)
+{
   m_References = input;
- }
+}
 
 /*
  * Retrieve the i-th reference image
  * An exception is thrown if it doesn't exist.
  */
-template<class TInputImage>
+template <class TInputImage>
 typename TensorflowMultisourceModelValidate<TInputImage>::ImagePointerType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetInputReference(unsigned int index)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetInputReference(unsigned int index)
+{
   if (m_References.size <= index || !m_References[index])
-    {
+  {
     itkExceptionMacro("There is no input reference #" << index);
-    }
+  }
 
   return m_References[index];
- }
+}
 
 /**
  * Perform the validation
@@ -103,73 +98,70 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelValidate<TInputImage>::GenerateData()
+{
 
   // Temporary images for outputs
   m_ConfusionMatrices.clear();
   m_MapsOfClasses.clear();
   m_ConfMatMaps.clear();
-  for (auto const& ref: m_References)
-    {
-    (void) ref;
+  for (auto const & ref : m_References)
+  {
+    (void)ref;
 
     // New confusion matrix
     MatMapType mat;
     m_ConfMatMaps.push_back(mat);
-    }
+  }
 
   // Run all the batches
   Superclass::GenerateData();
 
   // Compute confusion matrices
-  for (unsigned int i = 0 ; i < m_ConfMatMaps.size() ; i++)
-    {
+  for (unsigned int i = 0; i < m_ConfMatMaps.size(); i++)
+  {
     // Confusion matrix (map) for current target
     MatMapType mat = m_ConfMatMaps[i];
 
     // List all values
     MapOfClassesType values;
-    LabelValueType curVal = 0;
-    for (auto const& ref: mat)
-      {
+    LabelValueType   curVal = 0;
+    for (auto const & ref : mat)
+    {
       if (values.count(ref.first) == 0)
-        {
+      {
         values[ref.first] = curVal;
         curVal++;
-        }
-      for (auto const& in: ref.second)
+      }
+      for (auto const & in : ref.second)
         if (values.count(in.first) == 0)
-          {
+        {
           values[in.first] = curVal;
           curVal++;
-          }
-      }
+        }
+    }
 
     // Build the confusion matrix
     const LabelValueType nValues = values.size();
-    ConfMatType matrix(nValues, nValues);
+    ConfMatType          matrix(nValues, nValues);
     matrix.Fill(0);
-    for (auto const& ref: mat)
-      for (auto const& in: ref.second)
+    for (auto const & ref : mat)
+      for (auto const & in : ref.second)
         matrix[values[ref.first]][values[in.first]] = in.second;
 
     // Add the confusion matrix
     m_ConfusionMatrices.push_back(matrix);
     m_MapsOfClasses.push_back(values);
-
-    }
-
- }
+  }
+}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize)
- {
+TensorflowMultisourceModelValidate<TInputImage>::ProcessBatch(DictType &             inputs,
+                                                              const IndexValueType & sampleStart,
+                                                              const IndexValueType & batchSize)
+{
   // Populate input tensors
   IndexListType empty;
   this->PopulateInputTensors(inputs, sampleStart, batchSize, empty);
@@ -180,16 +172,16 @@ TensorflowMultisourceModelValidate<TInputImage>
 
   // Perform the validation
   if (outputs.size() != m_References.size())
-    {
-    itkWarningMacro("There is " << outputs.size() << " outputs returned after session run, " <<
-                    "but only " << m_References.size() << " reference(s) set");
-    }
+  {
+    itkWarningMacro("There is " << outputs.size() << " outputs returned after session run, "
+                                << "but only " << m_References.size() << " reference(s) set");
+  }
   SizeListType outputEFSizes = this->GetOutputExpressionFields();
-  for (unsigned int refIdx = 0 ; refIdx < outputs.size() ; refIdx++)
-    {
+  for (unsigned int refIdx = 0; refIdx < outputs.size(); refIdx++)
+  {
     // Recopy the chunk
     const SizeType outputFOESize = outputEFSizes[refIdx];
-    IndexType cpyStart;
+    IndexType      cpyStart;
     cpyStart.Fill(0);
     IndexType refRegStart;
     refRegStart.Fill(0);
@@ -216,31 +208,30 @@ TensorflowMultisourceModelValidate<TInputImage>
     IteratorType inIt(img, cpyRegion);
     IteratorType refIt(m_References[refIdx], refRegion);
     for (inIt.GoToBegin(), refIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt, ++refIt)
-      {
+    {
       const int classIn = static_cast<LabelValueType>(inIt.Get()[0]);
       const int classRef = static_cast<LabelValueType>(refIt.Get()[0]);
 
       if (m_ConfMatMaps[refIdx].count(classRef) == 0)
-        {
+      {
         MapType newMap;
         newMap[classIn] = 1;
         m_ConfMatMaps[refIdx][classRef] = newMap;
-        }
+      }
       else
-        {
+      {
         if (m_ConfMatMaps[refIdx][classRef].count(classIn) == 0)
-          {
+        {
           m_ConfMatMaps[refIdx][classRef][classIn] = 1;
-          }
+        }
         else
-          {
+        {
           m_ConfMatMaps[refIdx][classRef][classIn]++;
-          }
         }
       }
     }
-
- }
+  }
+}
 
 /*
  * Get the confusion matrix
@@ -248,17 +239,17 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 const typename TensorflowMultisourceModelValidate<TInputImage>::ConfMatType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetConfusionMatrix(unsigned int target)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetConfusionMatrix(unsigned int target)
+{
   if (target >= m_ConfusionMatrices.size())
-    {
-    itkExceptionMacro("Unable to get confusion matrix #" << target << ". " <<
-        "There is only " << m_ConfusionMatrices.size() << " available.");
-    }
+  {
+    itkExceptionMacro("Unable to get confusion matrix #" << target << ". "
+                                                         << "There is only " << m_ConfusionMatrices.size()
+                                                         << " available.");
+  }
 
   return m_ConfusionMatrices[target];
- }
+}
 
 /*
  * Get the map of classes
@@ -266,17 +257,17 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 const typename TensorflowMultisourceModelValidate<TInputImage>::MapOfClassesType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetMapOfClasses(unsigned int target)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetMapOfClasses(unsigned int target)
+{
   if (target >= m_MapsOfClasses.size())
-    {
-    itkExceptionMacro("Unable to get confusion matrix #" << target << ". " <<
-        "There is only " << m_MapsOfClasses.size() << " available.");
-    }
+  {
+    itkExceptionMacro("Unable to get confusion matrix #" << target << ". "
+                                                         << "There is only " << m_MapsOfClasses.size()
+                                                         << " available.");
+  }
 
   return m_MapsOfClasses[target];
- }
+}
 
 } // end namespace otb
 
diff --git a/include/otbTensorflowSampler.h b/include/otbTensorflowSampler.h
index bd363bc8ee191ce7506ca012b68171f6d8bdc828..4fae38e75245ca417c638105379ec7ff7dddf6dd 100644
--- a/include/otbTensorflowSampler.h
+++ b/include/otbTensorflowSampler.h
@@ -52,16 +52,14 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TVectorData>
-class ITK_EXPORT TensorflowSampler :
-public itk::ProcessObject
+class ITK_EXPORT TensorflowSampler : public itk::ProcessObject
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowSampler                       Self;
-  typedef itk::ProcessObject                      Superclass;
-  typedef itk::SmartPointer<Self>                 Pointer;
-  typedef itk::SmartPointer<const Self>           ConstPointer;
+  typedef TensorflowSampler             Self;
+  typedef itk::ProcessObject            Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Method for creation through the object factory. */
   itkNewMacro(Self);
@@ -70,33 +68,28 @@ public:
   itkTypeMacro(TensorflowSampler, itk::ProcessObject);
 
   /** Images typedefs */
-  typedef TInputImage                             ImageType;
-  typedef typename TInputImage::Pointer           ImagePointerType;
-  typedef typename TInputImage::InternalPixelType InternalPixelType;
-  typedef typename TInputImage::PixelType         PixelType;
-  typedef typename TInputImage::RegionType        RegionType;
-  typedef typename TInputImage::PointType         PointType;
-  typedef typename TInputImage::SizeType          SizeType;
-  typedef typename TInputImage::IndexType         IndexType;
-  typedef typename otb::MultiChannelExtractROI<InternalPixelType,
-      InternalPixelType>                          ExtractROIMultiFilterType;
-  typedef typename ExtractROIMultiFilterType::Pointer
-                                                  ExtractROIMultiFilterPointerType;
-  typedef typename std::vector<ImagePointerType>  ImagePointerListType;
-  typedef typename std::vector<SizeType>          SizeListType;
-  typedef typename itk::ImageRegionConstIterator<ImageType>
-                                                  IteratorType;
+  typedef TInputImage                                                                ImageType;
+  typedef typename TInputImage::Pointer                                              ImagePointerType;
+  typedef typename TInputImage::InternalPixelType                                    InternalPixelType;
+  typedef typename TInputImage::PixelType                                            PixelType;
+  typedef typename TInputImage::RegionType                                           RegionType;
+  typedef typename TInputImage::PointType                                            PointType;
+  typedef typename TInputImage::SizeType                                             SizeType;
+  typedef typename TInputImage::IndexType                                            IndexType;
+  typedef typename otb::MultiChannelExtractROI<InternalPixelType, InternalPixelType> ExtractROIMultiFilterType;
+  typedef typename ExtractROIMultiFilterType::Pointer                                ExtractROIMultiFilterPointerType;
+  typedef typename std::vector<ImagePointerType>                                     ImagePointerListType;
+  typedef typename std::vector<SizeType>                                             SizeListType;
+  typedef typename itk::ImageRegionConstIterator<ImageType>                          IteratorType;
 
   /** Vector data typedefs */
-  typedef TVectorData                             VectorDataType;
-  typedef typename VectorDataType::Pointer        VectorDataPointer;
-  typedef typename VectorDataType::DataTreeType   DataTreeType;
-  typedef typename itk::PreOrderTreeIterator<DataTreeType>
-                                                  TreeIteratorType;
-  typedef typename VectorDataType::DataNodeType   DataNodeType;
-  typedef typename DataNodeType::Pointer          DataNodePointer;
-  typedef typename DataNodeType::PolygonListPointerType
-                                                  PolygonListPointerType;
+  typedef TVectorData                                      VectorDataType;
+  typedef typename VectorDataType::Pointer                 VectorDataPointer;
+  typedef typename VectorDataType::DataTreeType            DataTreeType;
+  typedef typename itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
+  typedef typename VectorDataType::DataNodeType            DataNodeType;
+  typedef typename DataNodeType::Pointer                   DataNodePointer;
+  typedef typename DataNodeType::PolygonListPointerType    PolygonListPointerType;
 
   /** Set / get parameters */
   itkSetMacro(Field, std::string);
@@ -107,15 +100,18 @@ public:
   itkGetConstMacro(InputVectorData, VectorDataPointer);
 
   /** Set / get image */
-  virtual void PushBackInputWithPatchSize(const ImageType *input, SizeType & patchSize, InternalPixelType nodataval);
-  const ImageType* GetInput(unsigned int index);
+  virtual void
+  PushBackInputWithPatchSize(const ImageType * input, SizeType & patchSize, InternalPixelType nodataval);
+  const ImageType *
+  GetInput(unsigned int index);
 
   /** Set / get no-data related parameters */
   itkSetMacro(RejectPatchesWithNodata, bool);
   itkGetMacro(RejectPatchesWithNodata, bool);
 
   /** Do the real work */
-  virtual void Update();
+  virtual void
+  Update();
 
   /** Get outputs */
   itkGetMacro(OutputPatchImages, ImagePointerListType);
@@ -125,18 +121,21 @@ public:
 
 protected:
   TensorflowSampler();
-  virtual ~TensorflowSampler() {};
+  virtual ~TensorflowSampler(){};
 
-  virtual void ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples);
-  virtual void AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents);
+  virtual void
+  ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples);
+  virtual void
+  AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents);
 
 private:
-  TensorflowSampler(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowSampler(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  std::string          m_Field;
-  SizeListType         m_PatchSizes;
-  VectorDataPointer    m_InputVectorData;
+  std::string       m_Field;
+  SizeListType      m_PatchSizes;
+  VectorDataPointer m_InputVectorData;
 
   // Read only
   ImagePointerListType m_OutputPatchImages;
@@ -146,7 +145,7 @@ private:
 
   // No data stuff
   std::vector<InternalPixelType> m_NoDataValues;
-  bool                 m_RejectPatchesWithNodata;
+  bool                           m_RejectPatchesWithNodata;
 
 }; // end class
 
diff --git a/include/otbTensorflowSampler.hxx b/include/otbTensorflowSampler.hxx
index 8c0ea7459ad5e1ac0060438e3c6a73b760fc535a..77558c7ba08c6dc75ce8ced1d389a537150a68f7 100644
--- a/include/otbTensorflowSampler.hxx
+++ b/include/otbTensorflowSampler.hxx
@@ -18,36 +18,35 @@ namespace otb
 {
 
 template <class TInputImage, class TVectorData>
-TensorflowSampler<TInputImage, TVectorData>
-::TensorflowSampler()
- {
+TensorflowSampler<TInputImage, TVectorData>::TensorflowSampler()
+{
   m_NumberOfAcceptedSamples = 0;
   m_NumberOfRejectedSamples = 0;
   m_RejectPatchesWithNodata = false;
- }
+}
 
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::PushBackInputWithPatchSize(const ImageType *input, SizeType & patchSize, InternalPixelType nodataval)
- {
-  this->ProcessObject::PushBackInput(const_cast<ImageType*>(input));
+TensorflowSampler<TInputImage, TVectorData>::PushBackInputWithPatchSize(const ImageType * input,
+                                                                        SizeType &        patchSize,
+                                                                        InternalPixelType nodataval)
+{
+  this->ProcessObject::PushBackInput(const_cast<ImageType *>(input));
   m_PatchSizes.push_back(patchSize);
   m_NoDataValues.push_back(nodataval);
- }
+}
 
 template <class TInputImage, class TVectorData>
-const TInputImage*
-TensorflowSampler<TInputImage, TVectorData>
-::GetInput(unsigned int index)
- {
+const TInputImage *
+TensorflowSampler<TInputImage, TVectorData>::GetInput(unsigned int index)
+{
   if (this->GetNumberOfInputs() < 1)
   {
     itkExceptionMacro("Input not set");
   }
 
-  return static_cast<const ImageType*>(this->ProcessObject::GetInput(index));
- }
+  return static_cast<const ImageType *>(this->ProcessObject::GetInput(index));
+}
 
 
 /**
@@ -55,9 +54,10 @@ TensorflowSampler<TInputImage, TVectorData>
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples)
- {
+TensorflowSampler<TInputImage, TVectorData>::ResizeImage(ImagePointerType & image,
+                                                         SizeType &         patchSize,
+                                                         unsigned int       nbSamples)
+{
   // New image region
   RegionType region;
   region.SetSize(0, patchSize[0]);
@@ -71,16 +71,18 @@ TensorflowSampler<TInputImage, TVectorData>
 
   // Assign
   image = resizer->GetOutput();
- }
+}
 
 /**
  * Allocate an image given a patch size and a number of samples
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents)
- {
+TensorflowSampler<TInputImage, TVectorData>::AllocateImage(ImagePointerType & image,
+                                                           SizeType &         patchSize,
+                                                           unsigned int       nbSamples,
+                                                           unsigned int       nbComponents)
+{
   // Image region
   RegionType region;
   region.SetSize(0, patchSize[0]);
@@ -91,16 +93,15 @@ TensorflowSampler<TInputImage, TVectorData>
   image->SetNumberOfComponentsPerPixel(nbComponents);
   image->SetRegions(region);
   image->Allocate();
- }
+}
 
 /**
  * Do the work
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::Update()
- {
+TensorflowSampler<TInputImage, TVectorData>::Update()
+{
 
   // Check number of inputs
   if (this->GetNumberOfInputs() != m_PatchSizes.size())
@@ -109,8 +110,8 @@ TensorflowSampler<TInputImage, TVectorData>
   }
 
   // Count points
-  unsigned int nTotal = 0;
-  unsigned int geomId = 0;
+  unsigned int     nTotal = 0;
+  unsigned int     geomId = 0;
   TreeIteratorType itVector(m_InputVectorData->GetDataTree());
   itVector.GoToBegin();
   while (!itVector.IsAtEnd())
@@ -146,7 +147,7 @@ TensorflowSampler<TInputImage, TVectorData>
   const unsigned int nbInputs = this->GetNumberOfInputs();
   m_OutputPatchImages.clear();
   m_OutputPatchImages.reserve(nbInputs);
-  for (unsigned int i = 0 ; i < nbInputs ; i++)
+  for (unsigned int i = 0; i < nbInputs; i++)
   {
     ImagePointerType newImage;
     AllocateImage(newImage, m_PatchSizes[i], nTotal, GetInput(i)->GetNumberOfComponentsPerPixel());
@@ -160,7 +161,7 @@ TensorflowSampler<TInputImage, TVectorData>
   itVector.GoToBegin();
   unsigned long count = 0;
   unsigned long rejected = 0;
-  IndexType labelIndex;
+  IndexType     labelIndex;
   labelIndex[0] = 0;
   PixelType labelPix;
   labelPix.SetSize(1);
@@ -169,13 +170,13 @@ TensorflowSampler<TInputImage, TVectorData>
     if (!itVector.Get()->IsRoot() && !itVector.Get()->IsDocument() && !itVector.Get()->IsFolder())
     {
       DataNodePointer currentGeometry = itVector.Get();
-      PointType point = currentGeometry->GetPoint();
+      PointType       point = currentGeometry->GetPoint();
 
       // Get the label value
       labelPix[0] = static_cast<InternalPixelType>(currentGeometry->GetFieldAsInt(m_Field));
 
       bool hasBeenSampled = true;
-      for (unsigned int i = 0 ; i < nbInputs ; i++)
+      for (unsigned int i = 0; i < nbInputs; i++)
       {
         // Get input
         ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
@@ -188,7 +189,7 @@ TensorflowSampler<TInputImage, TVectorData>
         }
         // Check if the sampled patch contains a no-data value
         if (m_RejectPatchesWithNodata && hasBeenSampled)
-          {
+        {
           IndexType outIndex;
           outIndex[0] = 0;
           outIndex[1] = count * m_PatchSizes[i][1];
@@ -196,13 +197,13 @@ TensorflowSampler<TInputImage, TVectorData>
 
           IteratorType it(m_OutputPatchImages[i], region);
           for (it.GoToBegin(); !it.IsAtEnd(); ++it)
-            {
+          {
             PixelType pix = it.Get();
-            for (unsigned int band = 0 ; band < pix.Size() ; band++)
+            for (unsigned int band = 0; band < pix.Size(); band++)
               if (pix[band] == m_NoDataValues[i])
                 hasBeenSampled = false;
-            }
           }
+        }
       } // Next input
       if (hasBeenSampled)
       {
@@ -220,7 +221,6 @@ TensorflowSampler<TInputImage, TVectorData>
 
       // Update progress
       progress.CompletedPixel();
-
     }
 
     ++itVector;
@@ -228,7 +228,7 @@ TensorflowSampler<TInputImage, TVectorData>
 
   // Resize output images
   ResizeImage(m_OutputLabelImage, labelPatchSize, count);
-  for (unsigned int i = 0 ; i < nbInputs ; i++)
+  for (unsigned int i = 0; i < nbInputs; i++)
   {
     ResizeImage(m_OutputPatchImages[i], m_PatchSizes[i], count);
   }
@@ -236,8 +236,7 @@ TensorflowSampler<TInputImage, TVectorData>
   // Update number of samples produced
   m_NumberOfAcceptedSamples = count;
   m_NumberOfRejectedSamples = rejected;
-
- }
+}
 
 } // end namespace otb
 
diff --git a/include/otbTensorflowSamplingUtils.cxx b/include/otbTensorflowSamplingUtils.cxx
index 5cf88f6b171b61c9576b4ca68d855a0d6059d42f..db4d9ea01d718c5957a9080486dcb18b83097995 100644
--- a/include/otbTensorflowSamplingUtils.cxx
+++ b/include/otbTensorflowSamplingUtils.cxx
@@ -19,13 +19,15 @@ namespace tf
 //
 // Update the distribution of the patch located at the specified location
 //
-template<class TImage, class TDistribution>
-bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
-    typename TImage::PointType point, typename TImage::SizeType patchSize,
-    TDistribution & dist)
+template <class TImage, class TDistribution>
+bool
+UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
+                            typename TImage::PointType     point,
+                            typename TImage::SizeType      patchSize,
+                            TDistribution &                dist)
 {
   typename TImage::IndexType index;
-  bool canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
+  bool                       canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
   if (canTransform)
   {
     index[0] -= patchSize[0] / 2;
@@ -38,7 +40,7 @@ bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
       // Fill patch
       PropagateRequestedRegion<TImage>(inPtr, inPatchRegion);
 
-      typename itk::ImageRegionConstIterator<TImage> inIt (inPtr, inPatchRegion);
+      typename itk::ImageRegionConstIterator<TImage> inIt(inPtr, inPatchRegion);
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
         dist.Update(inIt.Get());
@@ -47,7 +49,6 @@ bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
     }
   }
   return false;
-
 }
 
 
diff --git a/include/otbTensorflowSamplingUtils.h b/include/otbTensorflowSamplingUtils.h
index 585f90132ea71509fe2a08ff8f56aa1eac2abb3f..846b71318e57e2f64b7b5d582c4a85223bd809b3 100644
--- a/include/otbTensorflowSamplingUtils.h
+++ b/include/otbTensorflowSamplingUtils.h
@@ -20,77 +20,89 @@ namespace otb
 namespace tf
 {
 
-template<class TImage>
+template <class TImage>
 class Distribution
 {
 public:
   typedef typename TImage::PixelType ValueType;
-  typedef vnl_vector<float> CountsType;
-
-  explicit Distribution(unsigned int nClasses): m_NbOfClasses(nClasses), m_Dist(CountsType(nClasses, 0))
-  {
-  }
-  Distribution(unsigned int nClasses, float fillValue): m_NbOfClasses(nClasses), m_Dist(CountsType(nClasses, fillValue))
-  {
-  }
-  Distribution(): m_NbOfClasses(2), m_Dist(CountsType(m_NbOfClasses, 0))
-  {
-  }
-  Distribution(const Distribution & other): m_Dist(other.Get()), m_NbOfClasses(m_Dist.size())
-  {
-  }
-  ~Distribution(){}
-
-  void Update(const typename TImage::PixelType & pixel)
+  typedef vnl_vector<float>          CountsType;
+
+  explicit Distribution(unsigned int nClasses)
+    : m_NbOfClasses(nClasses)
+    , m_Dist(CountsType(nClasses, 0))
+  {}
+  Distribution(unsigned int nClasses, float fillValue)
+    : m_NbOfClasses(nClasses)
+    , m_Dist(CountsType(nClasses, fillValue))
+  {}
+  Distribution()
+    : m_NbOfClasses(2)
+    , m_Dist(CountsType(m_NbOfClasses, 0))
+  {}
+  Distribution(const Distribution & other)
+    : m_Dist(other.Get())
+    , m_NbOfClasses(m_Dist.size())
+  {}
+  ~Distribution() {}
+
+  void
+  Update(const typename TImage::PixelType & pixel)
   {
     m_Dist[pixel]++;
   }
 
-  void Update(const Distribution & other)
+  void
+  Update(const Distribution & other)
   {
     const CountsType otherDist = other.Get();
-    for (unsigned int c = 0 ; c < m_NbOfClasses ; c++)
+    for (unsigned int c = 0; c < m_NbOfClasses; c++)
       m_Dist[c] += otherDist[c];
   }
 
-  CountsType Get() const
+  CountsType
+  Get() const
   {
     return m_Dist;
   }
 
-  CountsType GetNormalized() const
+  CountsType
+  GetNormalized() const
   {
-    const float invNorm = 1.0 / std::sqrt(dot_product(m_Dist, m_Dist));
+    const float      invNorm = 1.0 / std::sqrt(dot_product(m_Dist, m_Dist));
     const CountsType normalizedDist = invNorm * m_Dist;
     return normalizedDist;
   }
 
-  float Cosinus(const Distribution & other) const
+  float
+  Cosinus(const Distribution & other) const
   {
     return dot_product(other.GetNormalized(), GetNormalized());
   }
 
-  std::string ToString()
+  std::string
+  ToString()
   {
     std::stringstream ss;
     ss << "\n";
-    for (unsigned int c = 0 ; c < m_NbOfClasses ; c++)
+    for (unsigned int c = 0; c < m_NbOfClasses; c++)
       ss << "\tClass #" << c << " : " << m_Dist[c] << "\n";
     return ss.str();
   }
 
 private:
   unsigned int m_NbOfClasses;
-  CountsType m_Dist;
+  CountsType   m_Dist;
 };
 
 // Update the distribution of the patch located at the specified location
-template<class TImage, class TDistribution>
-bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
-    typename TImage::PointType point, typename TImage::SizeType patchSize,
-    TDistribution & dist);
-
-} // namesapce tf
+template <class TImage, class TDistribution>
+bool
+UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
+                            typename TImage::PointType     point,
+                            typename TImage::SizeType      patchSize,
+                            TDistribution &                dist);
+
+} // namespace tf
 } // namespace otb
 
 #include "otbTensorflowSamplingUtils.cxx"
diff --git a/include/otbTensorflowSource.h b/include/otbTensorflowSource.h
index 1556997f9a20c02c1f5f9fd80f92c0fc38270657..9bbeed12fbe07820c1a59d125d0af5343dfd3492 100644
--- a/include/otbTensorflowSource.h
+++ b/include/otbTensorflowSource.h
@@ -29,45 +29,43 @@ namespace otb
  * Images must have the same size.
  * This is the common input type used in every OTB-TF applications.
  */
-template<class TImage>
+template <class TImage>
 class TensorflowSource
 {
 public:
   /** Typedefs for images */
-  typedef TImage                                            FloatVectorImageType;
-  typedef typename FloatVectorImageType::Pointer            FloatVectorImagePointerType;
-  typedef typename FloatVectorImageType::InternalPixelType  InternalPixelType;
-  typedef otb::Image<InternalPixelType>                     FloatImageType;
-  typedef typename FloatImageType::SizeType                 SizeType;
+  typedef TImage                                           FloatVectorImageType;
+  typedef typename FloatVectorImageType::Pointer           FloatVectorImagePointerType;
+  typedef typename FloatVectorImageType::InternalPixelType InternalPixelType;
+  typedef otb::Image<InternalPixelType>                    FloatImageType;
+  typedef typename FloatImageType::SizeType                SizeType;
 
   /** Typedefs for image concatenation */
-  typedef otb::ImageList<FloatImageType>                    ImageListType;
-  typedef typename ImageListType::Pointer                   ImageListPointer;
-  typedef ImageListToVectorImageFilter<ImageListType,
-      FloatVectorImageType>                                 ListConcatenerFilterType;
-  typedef typename ListConcatenerFilterType::Pointer        ListConcatenerFilterPointer;
-  typedef MultiToMonoChannelExtractROI<InternalPixelType,
-      InternalPixelType>                                    MultiToMonoChannelFilterType;
-  typedef ObjectList<MultiToMonoChannelFilterType>          ExtractROIFilterListType;
-  typedef typename ExtractROIFilterListType::Pointer        ExtractROIFilterListPointer;
-  typedef otb::MultiChannelExtractROI<InternalPixelType,
-      InternalPixelType>                                    ExtractFilterType;
-  typedef otb::ObjectList<FloatVectorImageType>             FloatVectorImageListType;
+  typedef otb::ImageList<FloatImageType>                                     ImageListType;
+  typedef typename ImageListType::Pointer                                    ImageListPointer;
+  typedef ImageListToVectorImageFilter<ImageListType, FloatVectorImageType>  ListConcatenerFilterType;
+  typedef typename ListConcatenerFilterType::Pointer                         ListConcatenerFilterPointer;
+  typedef MultiToMonoChannelExtractROI<InternalPixelType, InternalPixelType> MultiToMonoChannelFilterType;
+  typedef ObjectList<MultiToMonoChannelFilterType>                           ExtractROIFilterListType;
+  typedef typename ExtractROIFilterListType::Pointer                         ExtractROIFilterListPointer;
+  typedef otb::MultiChannelExtractROI<InternalPixelType, InternalPixelType>  ExtractFilterType;
+  typedef otb::ObjectList<FloatVectorImageType>                              FloatVectorImageListType;
 
   // Initialize the source
-  void Set(FloatVectorImageListType * inputList);
+  void
+  Set(FloatVectorImageListType * inputList);
 
   // Get the source output
-  FloatVectorImagePointerType Get();
+  FloatVectorImagePointerType
+  Get();
 
   TensorflowSource();
-  virtual ~TensorflowSource (){};
+  virtual ~TensorflowSource(){};
 
 private:
   ListConcatenerFilterPointer m_Concatener;    // Mono-images stacker
   ImageListPointer            m_List;          // List of mono-images
   ExtractROIFilterListPointer m_ExtractorList; // Mono-images extractors
-
 };
 
 } // end namespace otb
diff --git a/include/otbTensorflowSource.hxx b/include/otbTensorflowSource.hxx
index 2ad575866fda4ba7baa9dc494c44bcc38b79f0cd..2e41253c69c70e328e9d6827ee1a5f5971c716cb 100644
--- a/include/otbTensorflowSource.hxx
+++ b/include/otbTensorflowSource.hxx
@@ -21,8 +21,7 @@ namespace otb
 // Constructor
 //
 template <class TImage>
-TensorflowSource<TImage>
-::TensorflowSource()
+TensorflowSource<TImage>::TensorflowSource()
 {}
 
 //
@@ -30,40 +29,38 @@ TensorflowSource<TImage>
 //
 template <class TImage>
 void
-TensorflowSource<TImage>
-::Set(FloatVectorImageListType * inputList)
+TensorflowSource<TImage>::Set(FloatVectorImageListType * inputList)
 {
   // Create one stack for input images list
-  m_Concatener    = ListConcatenerFilterType::New();
-  m_List          = ImageListType::New();
+  m_Concatener = ListConcatenerFilterType::New();
+  m_List = ImageListType::New();
   m_ExtractorList = ExtractROIFilterListType::New();
 
   // Split each input vector image into image
   // and generate an mono channel image list
   inputList->GetNthElement(0)->UpdateOutputInformation();
   SizeType size = inputList->GetNthElement(0)->GetLargestPossibleRegion().GetSize();
-  for( unsigned int i = 0; i < inputList->Size(); i++ )
+  for (unsigned int i = 0; i < inputList->Size(); i++)
   {
     FloatVectorImagePointerType vectIm = inputList->GetNthElement(i);
     vectIm->UpdateOutputInformation();
-    if( size != vectIm->GetLargestPossibleRegion().GetSize() )
+    if (size != vectIm->GetLargestPossibleRegion().GetSize())
     {
       itkGenericExceptionMacro("Input image size number " << i << " mismatch");
     }
 
-    for( unsigned int j = 0; j < vectIm->GetNumberOfComponentsPerPixel(); j++)
+    for (unsigned int j = 0; j < vectIm->GetNumberOfComponentsPerPixel(); j++)
     {
       typename MultiToMonoChannelFilterType::Pointer extractor = MultiToMonoChannelFilterType::New();
-      extractor->SetInput( vectIm );
-      extractor->SetChannel( j+1 );
+      extractor->SetInput(vectIm);
+      extractor->SetChannel(j + 1);
       extractor->UpdateOutputInformation();
-      m_ExtractorList->PushBack( extractor );
-      m_List->PushBack( extractor->GetOutput() );
+      m_ExtractorList->PushBack(extractor);
+      m_List->PushBack(extractor->GetOutput());
     }
   }
-  m_Concatener->SetInput( m_List );
+  m_Concatener->SetInput(m_List);
   m_Concatener->UpdateOutputInformation();
-
 }
 
 //
diff --git a/include/otbTensorflowStreamerFilter.h b/include/otbTensorflowStreamerFilter.h
index 4730d3691cd3bd64954091c3bfdbb5bb7422d870..fa985d007a1040bc5325d3831724c4108316da5e 100644
--- a/include/otbTensorflowStreamerFilter.h
+++ b/include/otbTensorflowStreamerFilter.h
@@ -26,12 +26,10 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TOutputImage>
-class ITK_EXPORT TensorflowStreamerFilter :
-public itk::ImageToImageFilter<TInputImage, TOutputImage>
+class ITK_EXPORT TensorflowStreamerFilter : public itk::ImageToImageFilter<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowStreamerFilter                           Self;
   typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
@@ -51,24 +49,31 @@ public:
   typedef typename ImageType::SizeType              SizeType;
   typedef typename Superclass::InputImageRegionType RegionType;
 
-  typedef TOutputImage                             OutputImageType;
+  typedef TOutputImage OutputImageType;
 
   itkSetMacro(OutputGridSize, SizeType);
   itkGetMacro(OutputGridSize, SizeType);
 
 protected:
   TensorflowStreamerFilter();
-  virtual ~TensorflowStreamerFilter() {};
+  virtual ~TensorflowStreamerFilter(){};
 
-  virtual void UpdateOutputData(itk::DataObject *output){(void) output; this->GenerateData();}
+  virtual void
+  UpdateOutputData(itk::DataObject * output)
+  {
+    (void)output;
+    this->GenerateData();
+  }
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
 private:
-  TensorflowStreamerFilter(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowStreamerFilter(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  SizeType                   m_OutputGridSize;       // Output grid size
+  SizeType m_OutputGridSize; // Output grid size
 
 }; // end class
 
diff --git a/include/otbTensorflowStreamerFilter.hxx b/include/otbTensorflowStreamerFilter.hxx
index 59904a54f3df99048dfa383e22dad0ee7bef9784..3aa1afca538841126700484f34f9261731b75d1e 100644
--- a/include/otbTensorflowStreamerFilter.hxx
+++ b/include/otbTensorflowStreamerFilter.hxx
@@ -19,30 +19,28 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowStreamerFilter<TInputImage, TOutputImage>
-::TensorflowStreamerFilter()
- {
+TensorflowStreamerFilter<TInputImage, TOutputImage>::TensorflowStreamerFilter()
+{
   m_OutputGridSize.Fill(1);
- }
+}
 
 /**
  * Compute the output image
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowStreamerFilter<TInputImage, TOutputImage>
-::GenerateData()
- {
+TensorflowStreamerFilter<TInputImage, TOutputImage>::GenerateData()
+{
   // Output pointer and requested region
   OutputImageType * outputPtr = this->GetOutput();
-  const RegionType outputReqRegion = outputPtr->GetRequestedRegion();
+  const RegionType  outputReqRegion = outputPtr->GetRequestedRegion();
   outputPtr->SetBufferedRegion(outputReqRegion);
   outputPtr->Allocate();
 
   // Compute the aligned region
   RegionType region;
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     // Get corners
     IndexValueType lower = outputReqRegion.GetIndex(dim);
     IndexValueType upper = lower + outputReqRegion.GetSize(dim);
@@ -54,35 +52,34 @@ TensorflowStreamerFilter<TInputImage, TOutputImage>
     // Move corners to aligned positions
     lower -= deltaLo;
     if (deltaUp > 0)
-      {
+    {
       upper += m_OutputGridSize[dim] - deltaUp;
-      }
+    }
 
     // Update region
     region.SetIndex(dim, lower);
     region.SetSize(dim, upper - lower);
-
-    }
+  }
 
   // Compute the number of subregions to process
   const unsigned int nbTilesX = region.GetSize(0) / m_OutputGridSize[0];
   const unsigned int nbTilesY = region.GetSize(1) / m_OutputGridSize[1];
 
   // Progress
-  itk::ProgressReporter progress(this, 0, nbTilesX*nbTilesY);
+  itk::ProgressReporter progress(this, 0, nbTilesX * nbTilesY);
 
   // For each tile, propagate the input region and recopy the output
-  ImageType * inputImage = static_cast<ImageType * >(  Superclass::ProcessObject::GetInput(0) );
+  ImageType *  inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(0));
   unsigned int tx, ty;
-  RegionType subRegion;
+  RegionType   subRegion;
   subRegion.SetSize(m_OutputGridSize);
   for (ty = 0; ty < nbTilesY; ty++)
   {
-    subRegion.SetIndex(1, ty*m_OutputGridSize[1] + region.GetIndex(1));
+    subRegion.SetIndex(1, ty * m_OutputGridSize[1] + region.GetIndex(1));
     for (tx = 0; tx < nbTilesX; tx++)
     {
       // Update the input subregion
-      subRegion.SetIndex(0, tx*m_OutputGridSize[0] + region.GetIndex(0));
+      subRegion.SetIndex(0, tx * m_OutputGridSize[0] + region.GetIndex(0));
 
       // The actual region to copy
       RegionType cpyRegion(subRegion);
@@ -94,12 +91,12 @@ TensorflowStreamerFilter<TInputImage, TOutputImage>
       inputImage->UpdateOutputData();
 
       // Copy the subregion to output
-      itk::ImageAlgorithm::Copy( inputImage, outputPtr, cpyRegion, cpyRegion );
+      itk::ImageAlgorithm::Copy(inputImage, outputPtr, cpyRegion, cpyRegion);
 
       progress.CompletedPixel();
     }
   }
- }
+}
 
 
 } // end namespace otb