From 371af4d10b312b112ca2606d03579023d60421c2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?R=C3=A9mi=20Cresson?= <remi.cresson@teledetection.fr>
Date: Sat, 15 Aug 2020 18:30:47 +0200
Subject: [PATCH 01/12] DOC: mentions book

---
 README.md | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/README.md b/README.md
index 64765934..f766f6f4 100644
--- a/README.md
+++ b/README.md
@@ -427,3 +427,7 @@ In the `python` folder are provided some [ready-to-use deep networks, with docum
 ## Tutorial
 
 A tutorial is available at [MDL4EO's blog](https://mdl4eo.irstea.fr/2019/01/04/an-introduction-to-deep-learning-on-remote-sensing-images-tutorial/)
+
+## Book
+
+A full tutorial is available in [this book](https://g.co/kgs/S77JPT). About 160 pages explain how to use OTBTF (with also OTB, QGIS and TensorFlow) for landcover mapping (patch-based image classification, fully convolutional models, hybrid deep networks X random forest classifiers, semantic segmentation from OSM data, image restoration with joint SAR/Optical sensors).
-- 
GitLab


From 9571b21d0b30ec9d6dfd4a1c7b49088024f3af28 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?R=C3=A9mi=20Cresson?= <remi.cresson@teledetection.fr>
Date: Sat, 15 Aug 2020 18:30:47 +0200
Subject: [PATCH 02/12] DOC: mentions book

---
 README.md | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/README.md b/README.md
index 64765934..f766f6f4 100644
--- a/README.md
+++ b/README.md
@@ -427,3 +427,7 @@ In the `python` folder are provided some [ready-to-use deep networks, with docum
 ## Tutorial
 
 A tutorial is available at [MDL4EO's blog](https://mdl4eo.irstea.fr/2019/01/04/an-introduction-to-deep-learning-on-remote-sensing-images-tutorial/)
+
+## Book
+
+A full tutorial is available in [this book](https://g.co/kgs/S77JPT). About 160 pages explain how to use OTBTF (with also OTB, QGIS and TensorFlow) for landcover mapping (patch-based image classification, fully convolutional models, hybrid deep networks X random forest classifiers, semantic segmentation from OSM data, image restoration with joint SAR/Optical sensors).
-- 
GitLab


From d890f94718efeba579c16f6a4e548d55e8185728 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?R=C3=A9mi=20Cresson?= <remi.cresson@teledetection.fr>
Date: Tue, 1 Sep 2020 18:12:04 +0200
Subject: [PATCH 03/12] Update README.md

---
 README.md | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/README.md b/README.md
index f766f6f4..c940a651 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ Available docker images:
 | **mdl4eo/otbtf1.6:cpu** | Ubuntu Xenial | r1.14  | 7.0.0 | CPU, no optimization   |
 | **mdl4eo/otbtf1.7:cpu** | Ubuntu Xenial | r1.14  | 7.0.0 | CPU, no optimization   |
 | **mdl4eo/otbtf1.7:gpu** | Ubuntu Xenial | r1.14  | 7.0.0 | GPU                    |
-| **mdl4eo/otbtf2.0:cpu** | Ubuntu Xenial | r2.1   | 7.1.0 | CPU, compiled with MKL |
+| **mdl4eo/otbtf2.0:cpu** | Ubuntu Xenial | r2.1   | 7.1.0 | CPU, no optimization   |
 | **mdl4eo/otbtf2.0:gpu** | Ubuntu Xenial | r2.1   | 7.1.0 | GPU                    |
 
 All GPU docker images are suited for **NVIDIA GPUs**. They use CUDA/CUDNN support and are built with compute capabilities 6.1, 5.2, 3.5. To change the compute capabilities, you can edit the dockerfile and build your own docker image (dockerfiles are provided in the `tools/dockerfiles/` path of the repository).
@@ -431,3 +431,7 @@ A tutorial is available at [MDL4EO's blog](https://mdl4eo.irstea.fr/2019/01/04/a
 ## Book
 
 A full tutorial is available in [this book](https://g.co/kgs/S77JPT). About 160 pages explain how to use OTBTF (with also OTB, QGIS and TensorFlow) for landcover mapping (patch-based image classification, fully convolutional models, hybrid deep networks X random forest classifiers, semantic segmentation from OSM data, image restoration with joint SAR/Optical sensors).
+
+## Cite
+
+Cresson, R. (2018). A framework for remote sensing images processing using deep learning techniques. IEEE Geoscience and Remote Sensing Letters, 16(1), 25-29.
-- 
GitLab


From 90d63a66e25159151d2f588f84fd41444c13cbeb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?R=C3=A9mi=20Cresson?= <remi.cresson@teledetection.fr>
Date: Tue, 1 Sep 2020 18:12:04 +0200
Subject: [PATCH 04/12] Update README.md

---
 README.md | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/README.md b/README.md
index f766f6f4..c940a651 100644
--- a/README.md
+++ b/README.md
@@ -46,7 +46,7 @@ Available docker images:
 | **mdl4eo/otbtf1.6:cpu** | Ubuntu Xenial | r1.14  | 7.0.0 | CPU, no optimization   |
 | **mdl4eo/otbtf1.7:cpu** | Ubuntu Xenial | r1.14  | 7.0.0 | CPU, no optimization   |
 | **mdl4eo/otbtf1.7:gpu** | Ubuntu Xenial | r1.14  | 7.0.0 | GPU                    |
-| **mdl4eo/otbtf2.0:cpu** | Ubuntu Xenial | r2.1   | 7.1.0 | CPU, compiled with MKL |
+| **mdl4eo/otbtf2.0:cpu** | Ubuntu Xenial | r2.1   | 7.1.0 | CPU, no optimization   |
 | **mdl4eo/otbtf2.0:gpu** | Ubuntu Xenial | r2.1   | 7.1.0 | GPU                    |
 
 All GPU docker images are suited for **NVIDIA GPUs**. They use CUDA/CUDNN support and are built with compute capabilities 6.1, 5.2, 3.5. To change the compute capabilities, you can edit the dockerfile and build your own docker image (dockerfiles are provided in the `tools/dockerfiles/` path of the repository).
@@ -431,3 +431,7 @@ A tutorial is available at [MDL4EO's blog](https://mdl4eo.irstea.fr/2019/01/04/a
 ## Book
 
 A full tutorial is available in [this book](https://g.co/kgs/S77JPT). About 160 pages explain how to use OTBTF (with also OTB, QGIS and TensorFlow) for landcover mapping (patch-based image classification, fully convolutional models, hybrid deep networks X random forest classifiers, semantic segmentation from OSM data, image restoration with joint SAR/Optical sensors).
+
+## Cite
+
+Cresson, R. (2018). A framework for remote sensing images processing using deep learning techniques. IEEE Geoscience and Remote Sensing Letters, 16(1), 25-29.
-- 
GitLab


From 3a9e958c0379c073edf75136c5247be0b0c942f5 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Mon, 8 Nov 2021 14:02:08 +0100
Subject: [PATCH 05/12] STYLE: apply clang-format

---
 app/otbDensePolygonClassStatistics.cxx        | 292 +++++++-------
 app/otbImageClassifierFromDeepFeatures.cxx    |  59 +--
 app/otbLabelImageSampleSelection.cxx          | 246 ++++++------
 app/otbPatchesExtraction.cxx                  | 166 ++++----
 app/otbPatchesSelection.cxx                   | 293 ++++++++-------
 app/otbTensorflowModelServe.cxx               | 246 ++++++------
 app/otbTensorflowModelTrain.cxx               | 355 +++++++++---------
 app/otbTrainClassifierFromDeepFeatures.cxx    | 119 +++---
 include/otbTensorflowCommon.cxx               |  69 ++--
 include/otbTensorflowCommon.h                 |  45 ++-
 include/otbTensorflowCopyUtils.cxx            |  11 +-
 include/otbTensorflowCopyUtils.h              |  87 +++--
 include/otbTensorflowDataTypeBridge.cxx       |  19 +-
 include/otbTensorflowDataTypeBridge.h         |  19 +-
 include/otbTensorflowGraphOperations.cxx      |   6 +-
 include/otbTensorflowGraphOperations.h        |  25 +-
 include/otbTensorflowMultisourceModelBase.h   | 109 +++---
 include/otbTensorflowMultisourceModelBase.hxx |  78 ++--
 include/otbTensorflowMultisourceModelFilter.h |  83 ++--
 .../otbTensorflowMultisourceModelFilter.hxx   | 333 ++++++++--------
 ...tbTensorflowMultisourceModelLearningBase.h |  69 ++--
 ...TensorflowMultisourceModelLearningBase.hxx | 141 ++++---
 include/otbTensorflowMultisourceModelTrain.h  |  28 +-
 .../otbTensorflowMultisourceModelTrain.hxx    |  31 +-
 .../otbTensorflowMultisourceModelValidate.h   |  65 ++--
 .../otbTensorflowMultisourceModelValidate.hxx | 193 +++++-----
 include/otbTensorflowSampler.h                |  87 +++--
 include/otbTensorflowSampler.hxx              |  79 ++--
 include/otbTensorflowSamplingUtils.cxx        |  15 +-
 include/otbTensorflowSamplingUtils.h          |  78 ++--
 include/otbTensorflowSource.h                 |  42 +--
 include/otbTensorflowSource.hxx               |  27 +-
 include/otbTensorflowStreamerFilter.h         |  25 +-
 include/otbTensorflowStreamerFilter.hxx       |  39 +-
 34 files changed, 1872 insertions(+), 1707 deletions(-)

diff --git a/app/otbDensePolygonClassStatistics.cxx b/app/otbDensePolygonClassStatistics.cxx
index fa7c2701..1b9b53f6 100644
--- a/app/otbDensePolygonClassStatistics.cxx
+++ b/app/otbDensePolygonClassStatistics.cxx
@@ -34,10 +34,11 @@ namespace otb
 namespace Wrapper
 {
 /** Utility function to negate std::isalnum */
-bool IsNotAlphaNum(char c)
-  {
+bool
+IsNotAlphaNum(char c)
+{
   return !std::isalnum(c);
-  }
+}
 
 class DensePolygonClassStatistics : public Application
 {
@@ -53,54 +54,53 @@ public:
   itkTypeMacro(DensePolygonClassStatistics, Application);
 
   /** DataObjects typedef */
-  typedef UInt32ImageType                           LabelImageType;
-  typedef UInt8ImageType                            MaskImageType;
-  typedef VectorData<>                              VectorDataType;
+  typedef UInt32ImageType LabelImageType;
+  typedef UInt8ImageType  MaskImageType;
+  typedef VectorData<>    VectorDataType;
 
   /** ProcessObjects typedef */
-  typedef otb::VectorDataIntoImageProjectionFilter<VectorDataType,
-      FloatVectorImageType>                                                       VectorDataReprojFilterType;
+  typedef otb::VectorDataIntoImageProjectionFilter<VectorDataType, FloatVectorImageType> VectorDataReprojFilterType;
 
-  typedef otb::VectorDataToLabelImageFilter<VectorDataType, LabelImageType>       RasterizeFilterType;
+  typedef otb::VectorDataToLabelImageFilter<VectorDataType, LabelImageType> RasterizeFilterType;
 
   typedef otb::VectorImage<MaskImageType::PixelType>                              InternalMaskImageType;
   typedef otb::ImageToNoDataMaskFilter<FloatVectorImageType, MaskImageType>       NoDataMaskFilterType;
   typedef otb::ImageToVectorImageCastFilter<MaskImageType, InternalMaskImageType> CastFilterType;
 
-  typedef otb::StreamingStatisticsMapFromLabelImageFilter<InternalMaskImageType,
-      LabelImageType>                                                             StatsFilterType;
+  typedef otb::StreamingStatisticsMapFromLabelImageFilter<InternalMaskImageType, LabelImageType> StatsFilterType;
 
-  typedef otb::StatisticsXMLFileWriter<FloatVectorImageType::PixelType>           StatWriterType;
+  typedef otb::StatisticsXMLFileWriter<FloatVectorImageType::PixelType> StatWriterType;
 
-  void DoInit()
+  void
+  DoInit()
   {
     SetName("DensePolygonClassStatistics");
     SetDescription("Computes statistics on a training polygon set.");
 
     // Documentation
     SetDocLongDescription("The application processes a dense set of polygons "
-      "intended for training (they should have a field giving the associated "
-      "class). The geometries are analyzed against a support image to compute "
-      "statistics : \n"
-      "  - number of samples per class\n"
-      "  - number of samples per geometry\n");
+                          "intended for training (they should have a field giving the associated "
+                          "class). The geometries are analyzed against a support image to compute "
+                          "statistics : \n"
+                          "  - number of samples per class\n"
+                          "  - number of samples per geometry\n");
     SetDocLimitations("None");
     SetDocAuthors("Remi Cresson");
 
     AddDocTag(Tags::Learning);
 
-    AddParameter(ParameterType_InputImage,  "in",   "Input image");
+    AddParameter(ParameterType_InputImage, "in", "Input image");
     SetParameterDescription("in", "Support image that will be classified");
-    
+
     AddParameter(ParameterType_InputVectorData, "vec", "Input vectors");
-    SetParameterDescription("vec","Input geometries to analyze");
-    
+    SetParameterDescription("vec", "Input geometries to analyze");
+
     AddParameter(ParameterType_OutputFilename, "out", "Output XML statistics file");
-    SetParameterDescription("out","Output file to store statistics (XML format)");
+    SetParameterDescription("out", "Output file to store statistics (XML format)");
 
     AddParameter(ParameterType_ListView, "field", "Field Name");
-    SetParameterDescription("field","Name of the field carrying the class number in the input vectors.");
-    SetListViewSingleSelectionMode("field",true);
+    SetParameterDescription("field", "Name of the field carrying the class number in the input vectors.");
+    SetListViewSingleSelectionMode("field", true);
 
     ElevationParametersHandler::AddElevationParameters(this, "elev");
 
@@ -110,158 +110,154 @@ public:
     SetDocExampleParameterValue("in", "support_image.tif");
     SetDocExampleParameterValue("vec", "variousVectors.shp");
     SetDocExampleParameterValue("field", "label");
-    SetDocExampleParameterValue("out","polygonStat.xml");
-
+    SetDocExampleParameterValue("out", "polygonStat.xml");
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
 
-  // Retrieve the field name
-  std::vector<int> selectedCFieldIdx = GetSelectedItems("field");
+    // Retrieve the field name
+    std::vector<int> selectedCFieldIdx = GetSelectedItems("field");
 
-  if(selectedCFieldIdx.empty())
+    if (selectedCFieldIdx.empty())
     {
-    otbAppLogFATAL(<<"No field has been selected for data labelling!");
+      otbAppLogFATAL(<< "No field has been selected for data labelling!");
     }
 
-  std::vector<std::string> cFieldNames = GetChoiceNames("field");  
-  std::string fieldName = cFieldNames[selectedCFieldIdx.front()];
-
-  otb::Wrapper::ElevationParametersHandler::SetupDEMHandlerFromElevationParameters(this,"elev");
-
-  // Get inputs
-  FloatVectorImageType::Pointer xs = GetParameterImage("in");
-  VectorDataType* shp = GetParameterVectorData("vec");
-
-  // Reproject vector data
-  m_VectorDataReprojectionFilter = VectorDataReprojFilterType::New();
-  m_VectorDataReprojectionFilter->SetInputVectorData(shp);
-  m_VectorDataReprojectionFilter->SetInputImage(xs);
-  m_VectorDataReprojectionFilter->Update();
-
-  // Internal no-data value
-  const LabelImageType::ValueType intNoData =
-      itk::NumericTraits<LabelImageType::ValueType>::max();
-
-  // Rasterize vector data (geometry ID)
-  m_RasterizeFIDFilter = RasterizeFilterType::New();
-  m_RasterizeFIDFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
-  m_RasterizeFIDFilter->SetOutputOrigin(xs->GetOrigin());
-  m_RasterizeFIDFilter->SetOutputSpacing(xs->GetSignedSpacing());
-  m_RasterizeFIDFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
-  m_RasterizeFIDFilter->SetBurnAttribute("________"); // Trick to get the polygon ID
-  m_RasterizeFIDFilter->SetGlobalWarningDisplay(false);
-  m_RasterizeFIDFilter->SetOutputProjectionRef(xs->GetProjectionRef());
-  m_RasterizeFIDFilter->SetBackgroundValue(intNoData);
-  m_RasterizeFIDFilter->SetDefaultBurnValue(0);
-
-  // Rasterize vector data (geometry class)
-  m_RasterizeClassFilter = RasterizeFilterType::New();
-  m_RasterizeClassFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
-  m_RasterizeClassFilter->SetOutputOrigin(xs->GetOrigin());
-  m_RasterizeClassFilter->SetOutputSpacing(xs->GetSignedSpacing());
-  m_RasterizeClassFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
-  m_RasterizeClassFilter->SetBurnAttribute(fieldName);
-  m_RasterizeClassFilter->SetOutputProjectionRef(xs->GetProjectionRef());
-  m_RasterizeClassFilter->SetBackgroundValue(intNoData);
-  m_RasterizeClassFilter->SetDefaultBurnValue(0);
-
-  // No data mask
-  m_NoDataFilter = NoDataMaskFilterType::New();
-  m_NoDataFilter->SetInput(xs);
-  m_NoDataCastFilter = CastFilterType::New();
-  m_NoDataCastFilter->SetInput(m_NoDataFilter->GetOutput());
-
-  // Stats (geometry ID)
-  m_FIDStatsFilter = StatsFilterType::New();
-  m_FIDStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
-  m_FIDStatsFilter->SetInputLabelImage(m_RasterizeFIDFilter->GetOutput());
-  m_FIDStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
-  AddProcess(m_FIDStatsFilter->GetStreamer(), "Computing number of samples per vector");
-  m_FIDStatsFilter->Update();
-
-  // Stats (geometry class)
-  m_ClassStatsFilter = StatsFilterType::New();
-  m_ClassStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
-  m_ClassStatsFilter->SetInputLabelImage(m_RasterizeClassFilter->GetOutput());
-  m_ClassStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
-  AddProcess(m_ClassStatsFilter->GetStreamer(), "Computing number of samples per class");
-  m_ClassStatsFilter->Update();
-
-  // Remove the no-data entries
-  StatsFilterType::LabelPopulationMapType fidMap = m_FIDStatsFilter->GetLabelPopulationMap();
-  StatsFilterType::LabelPopulationMapType classMap = m_ClassStatsFilter->GetLabelPopulationMap();
-  fidMap.erase(intNoData);
-  classMap.erase(intNoData);
-
-  m_StatWriter = StatWriterType::New();
-  m_StatWriter->SetFileName(this->GetParameterString("out"));
-  m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerClass", classMap);
-  m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerVector", fidMap);
-  m_StatWriter->Update();
-
+    std::vector<std::string> cFieldNames = GetChoiceNames("field");
+    std::string              fieldName = cFieldNames[selectedCFieldIdx.front()];
+
+    otb::Wrapper::ElevationParametersHandler::SetupDEMHandlerFromElevationParameters(this, "elev");
+
+    // Get inputs
+    FloatVectorImageType::Pointer xs = GetParameterImage("in");
+    VectorDataType *              shp = GetParameterVectorData("vec");
+
+    // Reproject vector data
+    m_VectorDataReprojectionFilter = VectorDataReprojFilterType::New();
+    m_VectorDataReprojectionFilter->SetInputVectorData(shp);
+    m_VectorDataReprojectionFilter->SetInputImage(xs);
+    m_VectorDataReprojectionFilter->Update();
+
+    // Internal no-data value
+    const LabelImageType::ValueType intNoData = itk::NumericTraits<LabelImageType::ValueType>::max();
+
+    // Rasterize vector data (geometry ID)
+    m_RasterizeFIDFilter = RasterizeFilterType::New();
+    m_RasterizeFIDFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
+    m_RasterizeFIDFilter->SetOutputOrigin(xs->GetOrigin());
+    m_RasterizeFIDFilter->SetOutputSpacing(xs->GetSignedSpacing());
+    m_RasterizeFIDFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
+    m_RasterizeFIDFilter->SetBurnAttribute("________"); // Trick to get the polygon ID
+    m_RasterizeFIDFilter->SetGlobalWarningDisplay(false);
+    m_RasterizeFIDFilter->SetOutputProjectionRef(xs->GetProjectionRef());
+    m_RasterizeFIDFilter->SetBackgroundValue(intNoData);
+    m_RasterizeFIDFilter->SetDefaultBurnValue(0);
+
+    // Rasterize vector data (geometry class)
+    m_RasterizeClassFilter = RasterizeFilterType::New();
+    m_RasterizeClassFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
+    m_RasterizeClassFilter->SetOutputOrigin(xs->GetOrigin());
+    m_RasterizeClassFilter->SetOutputSpacing(xs->GetSignedSpacing());
+    m_RasterizeClassFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
+    m_RasterizeClassFilter->SetBurnAttribute(fieldName);
+    m_RasterizeClassFilter->SetOutputProjectionRef(xs->GetProjectionRef());
+    m_RasterizeClassFilter->SetBackgroundValue(intNoData);
+    m_RasterizeClassFilter->SetDefaultBurnValue(0);
+
+    // No data mask
+    m_NoDataFilter = NoDataMaskFilterType::New();
+    m_NoDataFilter->SetInput(xs);
+    m_NoDataCastFilter = CastFilterType::New();
+    m_NoDataCastFilter->SetInput(m_NoDataFilter->GetOutput());
+
+    // Stats (geometry ID)
+    m_FIDStatsFilter = StatsFilterType::New();
+    m_FIDStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
+    m_FIDStatsFilter->SetInputLabelImage(m_RasterizeFIDFilter->GetOutput());
+    m_FIDStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
+    AddProcess(m_FIDStatsFilter->GetStreamer(), "Computing number of samples per vector");
+    m_FIDStatsFilter->Update();
+
+    // Stats (geometry class)
+    m_ClassStatsFilter = StatsFilterType::New();
+    m_ClassStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
+    m_ClassStatsFilter->SetInputLabelImage(m_RasterizeClassFilter->GetOutput());
+    m_ClassStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
+    AddProcess(m_ClassStatsFilter->GetStreamer(), "Computing number of samples per class");
+    m_ClassStatsFilter->Update();
+
+    // Remove the no-data entries
+    StatsFilterType::LabelPopulationMapType fidMap = m_FIDStatsFilter->GetLabelPopulationMap();
+    StatsFilterType::LabelPopulationMapType classMap = m_ClassStatsFilter->GetLabelPopulationMap();
+    fidMap.erase(intNoData);
+    classMap.erase(intNoData);
+
+    m_StatWriter = StatWriterType::New();
+    m_StatWriter->SetFileName(this->GetParameterString("out"));
+    m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerClass", classMap);
+    m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerVector", fidMap);
+    m_StatWriter->Update();
   }
 
-  void DoUpdateParameters()
+  void
+  DoUpdateParameters()
   {
-     if (HasValue("vec"))
-      {
-      std::string vectorFile = GetParameterString("vec");
-      ogr::DataSource::Pointer ogrDS =
-        ogr::DataSource::New(vectorFile, ogr::DataSource::Modes::Read);
-      ogr::Layer layer = ogrDS->GetLayer(0);
-      ogr::Feature feature = layer.ogr().GetNextFeature();
+    if (HasValue("vec"))
+    {
+      std::string              vectorFile = GetParameterString("vec");
+      ogr::DataSource::Pointer ogrDS = ogr::DataSource::New(vectorFile, ogr::DataSource::Modes::Read);
+      ogr::Layer               layer = ogrDS->GetLayer(0);
+      ogr::Feature             feature = layer.ogr().GetNextFeature();
 
       ClearChoices("field");
 
-      for(int iField=0; iField<feature.ogr().GetFieldCount(); iField++)
-        {
+      for (int iField = 0; iField < feature.ogr().GetFieldCount(); iField++)
+      {
         std::string key, item = feature.ogr().GetFieldDefnRef(iField)->GetNameRef();
         key = item;
-        std::string::iterator end = std::remove_if(key.begin(),key.end(),IsNotAlphaNum);
+        std::string::iterator end = std::remove_if(key.begin(), key.end(), IsNotAlphaNum);
         std::transform(key.begin(), end, key.begin(), tolower);
 
         OGRFieldType fieldType = feature.ogr().GetFieldDefnRef(iField)->GetType();
 
-        if(fieldType == OFTString || fieldType == OFTInteger || fieldType == OFTInteger64)
-          {
-          std::string tmpKey="field."+key.substr(0, end - key.begin());
-          AddChoice(tmpKey,item);
-          }
+        if (fieldType == OFTString || fieldType == OFTInteger || fieldType == OFTInteger64)
+        {
+          std::string tmpKey = "field." + key.substr(0, end - key.begin());
+          AddChoice(tmpKey, item);
         }
       }
+    }
 
-     // Check that the extension of the output parameter is XML (mandatory for
-     // StatisticsXMLFileWriter)
-     // Check it here to trigger the error before polygons analysis
-
-     if (HasValue("out"))
-       {
-       // Store filename extension
-       // Check that the right extension is given : expected .xml
-       const std::string extension = itksys::SystemTools::GetFilenameLastExtension(this->GetParameterString("out"));
-
-       if (itksys::SystemTools::LowerCase(extension) != ".xml")
-         {
-         otbAppLogFATAL( << extension << " is a wrong extension for parameter \"out\": Expected .xml" );
-         }
-       }
-  }
+    // Check that the extension of the output parameter is XML (mandatory for
+    // StatisticsXMLFileWriter)
+    // Check it here to trigger the error before polygons analysis
+
+    if (HasValue("out"))
+    {
+      // Store filename extension
+      // Check that the right extension is given : expected .xml
+      const std::string extension = itksys::SystemTools::GetFilenameLastExtension(this->GetParameterString("out"));
 
+      if (itksys::SystemTools::LowerCase(extension) != ".xml")
+      {
+        otbAppLogFATAL(<< extension << " is a wrong extension for parameter \"out\": Expected .xml");
+      }
+    }
+  }
 
 
 private:
   // Filters
   VectorDataReprojFilterType::Pointer m_VectorDataReprojectionFilter;
-  RasterizeFilterType::Pointer m_RasterizeFIDFilter;
-  RasterizeFilterType::Pointer m_RasterizeClassFilter;
-  NoDataMaskFilterType::Pointer m_NoDataFilter;
-  CastFilterType::Pointer m_NoDataCastFilter;
-  StatsFilterType::Pointer m_FIDStatsFilter;
-  StatsFilterType::Pointer m_ClassStatsFilter;
-  StatWriterType::Pointer m_StatWriter;
-
+  RasterizeFilterType::Pointer        m_RasterizeFIDFilter;
+  RasterizeFilterType::Pointer        m_RasterizeClassFilter;
+  NoDataMaskFilterType::Pointer       m_NoDataFilter;
+  CastFilterType::Pointer             m_NoDataCastFilter;
+  StatsFilterType::Pointer            m_FIDStatsFilter;
+  StatsFilterType::Pointer            m_ClassStatsFilter;
+  StatWriterType::Pointer             m_StatWriter;
 };
 
 } // end of namespace Wrapper
diff --git a/app/otbImageClassifierFromDeepFeatures.cxx b/app/otbImageClassifierFromDeepFeatures.cxx
index f3ffd273..3760f587 100644
--- a/app/otbImageClassifierFromDeepFeatures.cxx
+++ b/app/otbImageClassifierFromDeepFeatures.cxx
@@ -34,23 +34,23 @@ class ImageClassifierFromDeepFeatures : public CompositeApplication
 {
 public:
   /** Standard class typedefs. */
-  typedef ImageClassifierFromDeepFeatures              Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
+  typedef ImageClassifierFromDeepFeatures Self;
+  typedef Application                     Superclass;
+  typedef itk::SmartPointer<Self>         Pointer;
+  typedef itk::SmartPointer<const Self>   ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(ImageClassifierFromDeepFeatures, otb::Wrapper::CompositeApplication);
 
 private:
-
   //
   // Add an input source, which includes:
   // -an input image list
   // -an input patchsize (dimensions of samples)
   //
-  void AddAnInputImage(int inputNumber = 0)
+  void
+  AddAnInputImage(int inputNumber = 0)
   {
     inputNumber++;
 
@@ -64,7 +64,8 @@ private:
   }
 
 
-  void DoInit()
+  void
+  DoInit()
   {
 
     SetName("ImageClassifierFromDeepFeatures");
@@ -81,48 +82,48 @@ private:
     ClearApplications();
 
     // Add applications
-    AddApplication("ImageClassifier",      "classif", "Images classifier"  );
-    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model" );
+    AddApplication("ImageClassifier", "classif", "Images classifier");
+    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model");
 
     // Model shared parameters
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
+    for (int i = 1; i < tf::GetNumberOfSources(); i++)
     {
       AddAnInputImage(i);
     }
-    ShareParameter("deepmodel",  "tfmodel.model",
-        "Deep net model parameters",      "Deep net model parameters");
-    ShareParameter("output",     "tfmodel.output",
-        "Deep net outputs parameters",
-        "Deep net outputs parameters");
-    ShareParameter("optim", "tfmodel.optim",
-        "This group of parameters allows optimization of processing time",
-        "This group of parameters allows optimization of processing time");
+    ShareParameter("deepmodel", "tfmodel.model", "Deep net model parameters", "Deep net model parameters");
+    ShareParameter("output", "tfmodel.output", "Deep net outputs parameters", "Deep net outputs parameters");
+    ShareParameter("optim",
+                   "tfmodel.optim",
+                   "This group of parameters allows optimization of processing time",
+                   "This group of parameters allows optimization of processing time");
 
     // Classify shared parameters
-    ShareParameter("model"      , "classif.model"      , "Model file"          , "Model file"          );
-    ShareParameter("imstat"     , "classif.imstat"     , "Statistics file"     , "Statistics file"     );
-    ShareParameter("nodatalabel", "classif.nodatalabel", "Label mask value"    , "Label mask value"    );
-    ShareParameter("out"        , "classif.out"        , "Output image"        , "Output image"        );
-    ShareParameter("confmap"    , "classif.confmap"    , "Confidence map image", "Confidence map image");
-    ShareParameter("ram"        , "classif.ram"        , "Ram"                 , "Ram"                 );
+    ShareParameter("model", "classif.model", "Model file", "Model file");
+    ShareParameter("imstat", "classif.imstat", "Statistics file", "Statistics file");
+    ShareParameter("nodatalabel", "classif.nodatalabel", "Label mask value", "Label mask value");
+    ShareParameter("out", "classif.out", "Output image", "Output image");
+    ShareParameter("confmap", "classif.confmap", "Confidence map image", "Confidence map image");
+    ShareParameter("ram", "classif.ram", "Ram", "Ram");
   }
 
-  void DoUpdateParameters()
+  void
+  DoUpdateParameters()
   {
     UpdateInternalParameters("classif");
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
     ExecuteInternal("tfmodel");
-    GetInternalApplication("classif")->SetParameterInputImage("in", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
+    GetInternalApplication("classif")->SetParameterInputImage(
+      "in", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
     UpdateInternalParameters("classif");
     ExecuteInternal("classif");
   }
-
 };
 } // namespace Wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT( otb::Wrapper::ImageClassifierFromDeepFeatures )
+OTB_APPLICATION_EXPORT(otb::Wrapper::ImageClassifierFromDeepFeatures)
diff --git a/app/otbLabelImageSampleSelection.cxx b/app/otbLabelImageSampleSelection.cxx
index 50396fa0..f0d2c03d 100644
--- a/app/otbLabelImageSampleSelection.cxx
+++ b/app/otbLabelImageSampleSelection.cxx
@@ -35,59 +35,62 @@ class LabelImageSampleSelection : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef LabelImageSampleSelection           Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
+  typedef LabelImageSampleSelection     Self;
+  typedef Application                   Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(LabelImageSampleSelection, Application);
 
   /** Vector data typedefs */
-  typedef VectorDataType::DataTreeType                 DataTreeType;
-  typedef itk::PreOrderTreeIterator<DataTreeType>      TreeIteratorType;
-  typedef VectorDataType::DataNodeType                 DataNodeType;
-  typedef DataNodeType::Pointer                        DataNodePointer;
+  typedef VectorDataType::DataTreeType            DataTreeType;
+  typedef itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
+  typedef VectorDataType::DataNodeType            DataNodeType;
+  typedef DataNodeType::Pointer                   DataNodePointer;
 
   /** typedefs */
-  typedef Int16ImageType                               LabelImageType;
-  typedef unsigned int                                 IndexValueType;
+  typedef Int16ImageType LabelImageType;
+  typedef unsigned int   IndexValueType;
 
-  void DoUpdateParameters()
-  {
-  }
+  void
+  DoUpdateParameters()
+  {}
 
   /*
    * Display the percentage
    */
-  void ShowProgress(unsigned int count, unsigned int total, unsigned int step = 1000)
+  void
+  ShowProgress(unsigned int count, unsigned int total, unsigned int step = 1000)
   {
     if (count % step == 0)
     {
-      std::cout << std::setprecision(3) << "\r" << (100.0 * count / (float) total) << "%      " << std::flush;
+      std::cout << std::setprecision(3) << "\r" << (100.0 * count / (float)total) << "%      " << std::flush;
     }
   }
 
-  void ShowProgressDone()
+  void
+  ShowProgressDone()
   {
     std::cout << "\rDone      " << std::flush;
     std::cout << std::endl;
   }
 
-  void DoInit()
+  void
+  DoInit()
   {
 
     // Documentation
     SetName("LabelImageSampleSelection");
     SetDescription("This application extracts points from an input label image. "
-        "This application is like \"SampleSelection\", but uses an input label "
-        "image, rather than an input vector data.");
+                   "This application is like \"SampleSelection\", but uses an input label "
+                   "image, rather than an input vector data.");
     SetDocLongDescription("This application produces a vector data containing "
-        "a set of points centered on the pixels of the input label image. "
-        "The user can control the number of points. The default strategy consists "
-        "in producing the same number of points in each class. If one class has a "
-        "smaller number of points than requested, this one is adjusted.");
+                          "a set of points centered on the pixels of the input label image. "
+                          "The user can control the number of points. The default strategy consists "
+                          "in producing the same number of points in each class. If one class has a "
+                          "smaller number of points than requested, this one is adjusted.");
 
     SetDocAuthors("Remi Cresson");
 
@@ -97,39 +100,41 @@ public:
     // Strategy
     AddParameter(ParameterType_Choice, "strategy", "Sampling strategy");
 
-    AddChoice("strategy.constant","Set the same samples counts for all classes");
-    SetParameterDescription("strategy.constant","Set the same samples counts for all classes");
+    AddChoice("strategy.constant", "Set the same samples counts for all classes");
+    SetParameterDescription("strategy.constant", "Set the same samples counts for all classes");
 
     AddParameter(ParameterType_Int, "strategy.constant.nb", "Number of samples for all classes");
     SetParameterDescription("strategy.constant.nb", "Number of samples for all classes");
-    SetMinimumParameterIntValue("strategy.constant.nb",1);
-    SetDefaultParameterInt("strategy.constant.nb",1000);
+    SetMinimumParameterIntValue("strategy.constant.nb", 1);
+    SetDefaultParameterInt("strategy.constant.nb", 1000);
 
-    AddChoice("strategy.total","Set the total number of samples to generate, and use class proportions.");
-    SetParameterDescription("strategy.total","Set the total number of samples to generate, and use class proportions.");
-    AddParameter(ParameterType_Int,"strategy.total.v","The number of samples to generate");
-    SetParameterDescription("strategy.total.v","The number of samples to generate");
-    SetMinimumParameterIntValue("strategy.total.v",1);
-    SetDefaultParameterInt("strategy.total.v",1000);
+    AddChoice("strategy.total", "Set the total number of samples to generate, and use class proportions.");
+    SetParameterDescription("strategy.total",
+                            "Set the total number of samples to generate, and use class proportions.");
+    AddParameter(ParameterType_Int, "strategy.total.v", "The number of samples to generate");
+    SetParameterDescription("strategy.total.v", "The number of samples to generate");
+    SetMinimumParameterIntValue("strategy.total.v", 1);
+    SetDefaultParameterInt("strategy.total.v", 1000);
 
-    AddChoice("strategy.smallest","Set same number of samples for all classes, with the smallest class fully sampled");
-    SetParameterDescription("strategy.smallest","Set same number of samples for all classes, with the smallest class fully sampled");
+    AddChoice("strategy.smallest", "Set same number of samples for all classes, with the smallest class fully sampled");
+    SetParameterDescription("strategy.smallest",
+                            "Set same number of samples for all classes, with the smallest class fully sampled");
 
-    AddChoice("strategy.all","Take all samples");
-    SetParameterDescription("strategy.all","Take all samples");
+    AddChoice("strategy.all", "Take all samples");
+    SetParameterDescription("strategy.all", "Take all samples");
 
     // Default strategy : smallest
-    SetParameterString("strategy","constant");
+    SetParameterString("strategy", "constant");
 
     // Input no-data value
     AddParameter(ParameterType_Int, "nodata", "nodata value");
-    MandatoryOn                    ("nodata");
-    SetDefaultParameterInt         ("nodata", -1);
+    MandatoryOn("nodata");
+    SetDefaultParameterInt("nodata", -1);
 
     // Padding
     AddParameter(ParameterType_Int, "pad", "padding, in pixels");
-    SetDefaultParameterInt         ("pad", 0);
-    MandatoryOff                   ("pad");
+    SetDefaultParameterInt("pad", 0);
+    MandatoryOff("pad");
 
     // Output points
     AddParameter(ParameterType_OutputVectorData, "outvec", "output set of points");
@@ -139,19 +144,20 @@ public:
     SetDocExampleParameterValue("outvec", "terrain_truth_points_sel.sqlite");
 
     AddRAMParameter();
-
   }
 
 
-  void DoExecute()
+  void
+  DoExecute()
   {
 
     // Count the number of pixels in each class
     const LabelImageType::InternalPixelType MAX_NB_OF_CLASSES =
-        itk::NumericTraits<LabelImageType::InternalPixelType>::max();;
+      itk::NumericTraits<LabelImageType::InternalPixelType>::max();
+    ;
     LabelImageType::InternalPixelType class_begin = MAX_NB_OF_CLASSES;
     LabelImageType::InternalPixelType class_end = 0;
-    vnl_vector<IndexValueType> tmp_number_of_samples(MAX_NB_OF_CLASSES, 0);
+    vnl_vector<IndexValueType>        tmp_number_of_samples(MAX_NB_OF_CLASSES, 0);
 
     otbAppLogINFO("Computing number of pixels in each class");
 
@@ -161,10 +167,10 @@ public:
     m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
 
     // We pad the image, if this is requested by the user
-    LabelImageType::Pointer inputImage = GetParameterInt16Image("inref");
+    LabelImageType::Pointer    inputImage = GetParameterInt16Image("inref");
     LabelImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
     entireRegion.ShrinkByRadius(GetParameterInt("pad"));
-    m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
+    m_StreamingManager->PrepareStreaming(inputImage, entireRegion);
 
     // Get nodata value
     const LabelImageType::InternalPixelType nodata = GetParameterInt("nodata");
@@ -175,7 +181,7 @@ public:
     {
       LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
       tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
+      itk::ImageRegionConstIterator<LabelImageType> inIt(inputImage, streamRegion);
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
         LabelImageType::InternalPixelType pixVal = inIt.Get();
@@ -204,14 +210,13 @@ public:
     // Number of samples in each class (target)
     vnl_vector<IndexValueType> target_number_of_samples(number_of_classes, 0);
 
-    otbAppLogINFO( "Number of classes: " << number_of_classes <<
-        " starting from " << class_begin <<
-        " to " << class_end << " (no-data is " << nodata << ")");
-    otbAppLogINFO( "Number of pixels in each class: " << number_of_samples );
+    otbAppLogINFO("Number of classes: " << number_of_classes << " starting from " << class_begin << " to " << class_end
+                                        << " (no-data is " << nodata << ")");
+    otbAppLogINFO("Number of pixels in each class: " << number_of_samples);
 
     // Check the smallest number of samples amongst classes
     IndexValueType min_elem_in_class = itk::NumericTraits<IndexValueType>::max();
-    for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
+    for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
       min_elem_in_class = std::min(min_elem_in_class, number_of_samples[classIdx]);
 
     // If one class is empty, throw an error
@@ -226,79 +231,73 @@ public:
     // Compute the sampling step for each classes, depending on the chosen strategy
     switch (this->GetParameterInt("strategy"))
     {
-    // constant
-    case 0:
-    {
-      // Set the target number of samples in each class
-      target_number_of_samples.fill(GetParameterInt("strategy.constant.nb"));
-
-      // re adjust the number of samples to select in each class
-      if (min_elem_in_class < target_number_of_samples[0])
-      {
-        otbAppLogWARNING("Smallest class has " << min_elem_in_class <<
-            " samples but a number of " << target_number_of_samples[0] <<
-            " is given. Using " << min_elem_in_class);
-        target_number_of_samples.fill( min_elem_in_class );
-      }
+      // constant
+      case 0: {
+        // Set the target number of samples in each class
+        target_number_of_samples.fill(GetParameterInt("strategy.constant.nb"));
 
-      // Compute the sampling step
-      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-        step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
-    }
-    break;
+        // re adjust the number of samples to select in each class
+        if (min_elem_in_class < target_number_of_samples[0])
+        {
+          otbAppLogWARNING("Smallest class has " << min_elem_in_class << " samples but a number of "
+                                                 << target_number_of_samples[0] << " is given. Using "
+                                                 << min_elem_in_class);
+          target_number_of_samples.fill(min_elem_in_class);
+        }
 
-    // total
-    case 1:
-    {
-      // Compute the sampling step
-      IndexValueType step = number_of_samples.sum() / this->GetParameterInt("strategy.total.v");
-      if (step == 0)
-      {
-        otbAppLogWARNING("The number of samples available is smaller than the required number of samples. " <<
-            "Setting sampling step to 1.");
-        step = 1;
+        // Compute the sampling step
+        for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
+          step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
       }
-      step_for_class.fill(step);
-
-      // Compute the target number of samples
-      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-        target_number_of_samples[classIdx] = number_of_samples[classIdx] / step;
+      break;
 
-    }
-    break;
+      // total
+      case 1: {
+        // Compute the sampling step
+        IndexValueType step = number_of_samples.sum() / this->GetParameterInt("strategy.total.v");
+        if (step == 0)
+        {
+          otbAppLogWARNING("The number of samples available is smaller than the required number of samples. "
+                           << "Setting sampling step to 1.");
+          step = 1;
+        }
+        step_for_class.fill(step);
 
-    // smallest
-    case 2:
-    {
-      // Set the target number of samples to the smallest class
-      target_number_of_samples.fill( min_elem_in_class );
+        // Compute the target number of samples
+        for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
+          target_number_of_samples[classIdx] = number_of_samples[classIdx] / step;
+      }
+      break;
 
-      // Compute the sampling step
-      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-        step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
+      // smallest
+      case 2: {
+        // Set the target number of samples to the smallest class
+        target_number_of_samples.fill(min_elem_in_class);
 
-    }
-    break;
+        // Compute the sampling step
+        for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
+          step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
+      }
+      break;
 
-    // All
-    case 3:
-    {
-      // Easy
-      step_for_class.fill(1);
-      target_number_of_samples = number_of_samples;
-    }
-    break;
-    default:
-      otbAppLogFATAL("Strategy mode unknown :"<<this->GetParameterString("strategy"));
+      // All
+      case 3: {
+        // Easy
+        step_for_class.fill(1);
+        target_number_of_samples = number_of_samples;
+      }
       break;
+      default:
+        otbAppLogFATAL("Strategy mode unknown :" << this->GetParameterString("strategy"));
+        break;
     }
 
     // Print quick summary
     otbAppLogINFO("Sampling summary:");
     otbAppLogINFO("\tClass\tStep\tTot");
-    for (LabelImageType::InternalPixelType i = 0 ; i < number_of_classes ; i++)
+    for (LabelImageType::InternalPixelType i = 0; i < number_of_classes; i++)
     {
-      vnl_vector<int> tmp (3,0);
+      vnl_vector<int> tmp(3, 0);
       tmp[0] = i + class_begin;
       tmp[1] = step_for_class[i];
       tmp[2] = target_number_of_samples[i];
@@ -309,8 +308,8 @@ public:
     // TODO: how to pre-allocate the datatree?
     m_OutVectorData = VectorDataType::New();
     DataTreeType::Pointer tree = m_OutVectorData->GetDataTree();
-    DataNodePointer root = tree->GetRoot()->Get();
-    DataNodePointer document = DataNodeType::New();
+    DataNodePointer       root = tree->GetRoot()->Get();
+    DataNodePointer       document = DataNodeType::New();
     document->SetNodeType(DOCUMENT);
     tree->Add(document, root);
 
@@ -322,15 +321,15 @@ public:
     // Second iteration, to prepare the samples
     vnl_vector<IndexValueType> sampledCount(number_of_classes, 0);
     vnl_vector<IndexValueType> iteratorCount(number_of_classes, 0);
-    IndexValueType n_tot = 0;
-    const IndexValueType target_n_tot = target_number_of_samples.sum();
+    IndexValueType             n_tot = 0;
+    const IndexValueType       target_n_tot = target_number_of_samples.sum();
     for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
     {
       LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
       tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
+      itk::ImageRegionConstIterator<LabelImageType> inIt(inputImage, streamRegion);
 
-      for (inIt.GoToBegin() ; !inIt.IsAtEnd() ; ++inIt)
+      for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
         LabelImageType::InternalPixelType classVal = inIt.Get();
 
@@ -342,7 +341,7 @@ public:
           iteratorCount[classVal]++;
 
           // Every Xi samples (Xi is the step for class i)
-          if (iteratorCount[classVal] % ((int) step_for_class[classVal]) == 0 &&
+          if (iteratorCount[classVal] % ((int)step_for_class[classVal]) == 0 &&
               sampledCount[classVal] < target_number_of_samples[classVal])
           {
             // Add this sample
@@ -366,15 +365,14 @@ public:
           } // sample this one
         }
       } // next pixel
-    } // next streaming region
+    }   // next streaming region
     ShowProgressDone();
 
-    otbAppLogINFO( "Number of samples in each class: " << sampledCount );
+    otbAppLogINFO("Number of samples in each class: " << sampledCount);
 
-    otbAppLogINFO( "Writing output vector data");
+    otbAppLogINFO("Writing output vector data");
 
     SetParameterOutputVectorData("outvec", m_OutVectorData);
-
   }
 
 private:
@@ -382,7 +380,7 @@ private:
 
 }; // end of class
 
-} // end namespace wrapper
+} // namespace Wrapper
 } // end namespace otb
 
 OTB_APPLICATION_EXPORT(otb::Wrapper::LabelImageSampleSelection)
diff --git a/app/otbPatchesExtraction.cxx b/app/otbPatchesExtraction.cxx
index 7b0ce456..bcea8b90 100644
--- a/app/otbPatchesExtraction.cxx
+++ b/app/otbPatchesExtraction.cxx
@@ -33,10 +33,10 @@ class PatchesExtraction : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef PatchesExtraction                   Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
+  typedef PatchesExtraction             Self;
+  typedef Application                   Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
@@ -46,21 +46,21 @@ public:
   typedef otb::TensorflowSampler<FloatVectorImageType, VectorDataType> SamplerType;
 
   /** Typedefs for image concatenation */
-  typedef TensorflowSource<FloatVectorImageType>                       TFSourceType;
+  typedef TensorflowSource<FloatVectorImageType> TFSourceType;
 
   //
   // Store stuff related to one source
   //
   struct SourceBundle
   {
-    TFSourceType                       m_ImageSource;   // Image source
-    FloatVectorImageType::SizeType     m_PatchSize;          // Patch size
+    TFSourceType                   m_ImageSource; // Image source
+    FloatVectorImageType::SizeType m_PatchSize;   // Patch size
 
-    std::string                        m_KeyIn;   // Key of input image list
-    std::string                        m_KeyOut;  // Key of output samples image
-    std::string                        m_KeyPszX; // Key for samples sizes X
-    std::string                        m_KeyPszY; // Key for samples sizes Y
-    std::string                        m_KeyNoData; // Key for no-data value
+    std::string m_KeyIn;     // Key of input image list
+    std::string m_KeyOut;    // Key of output samples image
+    std::string m_KeyPszX;   // Key for samples sizes X
+    std::string m_KeyPszY;   // Key for samples sizes Y
+    std::string m_KeyNoData; // Key for no-data value
 
     FloatVectorImageType::InternalPixelType m_NoDataValue; // No data value
   };
@@ -72,56 +72,57 @@ public:
   // -an output image (samples)
   // -an input patchsize (dimensions of samples)
   //
-  void AddAnInputImage()
+  void
+  AddAnInputImage()
   {
     // Number of source
     unsigned int inputNumber = m_Bundles.size() + 1;
 
     // Create keys and descriptions
-    std::stringstream ss_group_key, ss_desc_group, ss_key_in, ss_key_out, ss_desc_in,
-    ss_desc_out, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y, ss_key_nodata, ss_desc_nodata;
-    ss_group_key   << "source"                    << inputNumber;
-    ss_desc_group  << "Parameters for source "    << inputNumber;
-    ss_key_out     << ss_group_key.str()          << ".out";
-    ss_desc_out    << "Output patches for image " << inputNumber;
-    ss_key_in      << ss_group_key.str()          << ".il";
-    ss_desc_in     << "Input image(s) "           << inputNumber;
-    ss_key_dims_x  << ss_group_key.str()          << ".patchsizex";
-    ss_desc_dims_x << "X patch size for image "   << inputNumber;
-    ss_key_dims_y  << ss_group_key.str()          << ".patchsizey";
-    ss_desc_dims_y << "Y patch size for image "   << inputNumber;
-    ss_key_nodata  << ss_group_key.str()          << ".nodata";
-    ss_desc_nodata << "No-data value for image "   << inputNumber << "(used only if \"usenodata\" is on)";
+    std::stringstream ss_group_key, ss_desc_group, ss_key_in, ss_key_out, ss_desc_in, ss_desc_out, ss_key_dims_x,
+      ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y, ss_key_nodata, ss_desc_nodata;
+    ss_group_key << "source" << inputNumber;
+    ss_desc_group << "Parameters for source " << inputNumber;
+    ss_key_out << ss_group_key.str() << ".out";
+    ss_desc_out << "Output patches for image " << inputNumber;
+    ss_key_in << ss_group_key.str() << ".il";
+    ss_desc_in << "Input image(s) " << inputNumber;
+    ss_key_dims_x << ss_group_key.str() << ".patchsizex";
+    ss_desc_dims_x << "X patch size for image " << inputNumber;
+    ss_key_dims_y << ss_group_key.str() << ".patchsizey";
+    ss_desc_dims_y << "Y patch size for image " << inputNumber;
+    ss_key_nodata << ss_group_key.str() << ".nodata";
+    ss_desc_nodata << "No-data value for image " << inputNumber << "(used only if \"usenodata\" is on)";
 
     // Populate group
-    AddParameter(ParameterType_Group,          ss_group_key.str(),  ss_desc_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_in.str(),     ss_desc_in.str() );
-    AddParameter(ParameterType_OutputImage,    ss_key_out.str(),    ss_desc_out.str());
-    AddParameter(ParameterType_Int,            ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue               (ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int,            ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue               (ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_Float,          ss_key_nodata.str(), ss_desc_nodata.str());
-    SetDefaultParameterFloat                  (ss_key_nodata.str(), 0);
+    AddParameter(ParameterType_Group, ss_group_key.str(), ss_desc_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_in.str(), ss_desc_in.str());
+    AddParameter(ParameterType_OutputImage, ss_key_out.str(), ss_desc_out.str());
+    AddParameter(ParameterType_Int, ss_key_dims_x.str(), ss_desc_dims_x.str());
+    SetMinimumParameterIntValue(ss_key_dims_x.str(), 1);
+    AddParameter(ParameterType_Int, ss_key_dims_y.str(), ss_desc_dims_y.str());
+    SetMinimumParameterIntValue(ss_key_dims_y.str(), 1);
+    AddParameter(ParameterType_Float, ss_key_nodata.str(), ss_desc_nodata.str());
+    SetDefaultParameterFloat(ss_key_nodata.str(), 0);
 
     // Add a new bundle
     SourceBundle bundle;
-    bundle.m_KeyIn   = ss_key_in.str();
-    bundle.m_KeyOut  = ss_key_out.str();
+    bundle.m_KeyIn = ss_key_in.str();
+    bundle.m_KeyOut = ss_key_out.str();
     bundle.m_KeyPszX = ss_key_dims_x.str();
     bundle.m_KeyPszY = ss_key_dims_y.str();
     bundle.m_KeyNoData = ss_key_nodata.str();
 
     m_Bundles.push_back(bundle);
-
   }
 
   //
   // Prepare bundles from the number of points
   //
-  void PrepareInputs()
+  void
+  PrepareInputs()
   {
-    for (auto& bundle: m_Bundles)
+    for (auto & bundle : m_Bundles)
     {
       // Create a stack of input images
       FloatVectorImageListType::Pointer list = GetParameterImageList(bundle.m_KeyIn);
@@ -136,26 +137,31 @@ public:
     }
   }
 
-  void DoInit()
+  void
+  DoInit()
   {
 
     // Documentation
     SetName("PatchesExtraction");
     SetDescription("This application extracts patches in multiple input images. Change "
-        "the " + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of "
-        "sources.");
-    SetDocLongDescription("The application takes an input vector layer which is a set of "
-        "points, typically the output of the \"SampleSelection\" or the \"LabelImageSampleSelection\" "
-        "application to sample patches in the input images (samples are centered on the points). "
-        "A \"source\" parameters group is composed of (i) an input image list (can be "
-        "one image e.g. high res. image, or multiple e.g. time series), (ii) the size "
-        "of the patches to sample, and (iii) the output images of patches which will "
-        "be generated at the end of the process. The example below show how to "
-        "set the samples sizes. For a SPOT6 image for instance, the patch size can "
-        "be 64x64 and for an input Sentinel-2 time series the patch size could be "
-        "1x1. Note that if a dimension size is not defined, the largest one will "
-        "be used (i.e. input image dimensions. The number of input sources can be changed "
-        "at runtime by setting the system environment variable " + tf::ENV_VAR_NAME_NSOURCES);
+                   "the " +
+                   tf::ENV_VAR_NAME_NSOURCES +
+                   " environment variable to set the number of "
+                   "sources.");
+    SetDocLongDescription(
+      "The application takes an input vector layer which is a set of "
+      "points, typically the output of the \"SampleSelection\" or the \"LabelImageSampleSelection\" "
+      "application to sample patches in the input images (samples are centered on the points). "
+      "A \"source\" parameters group is composed of (i) an input image list (can be "
+      "one image e.g. high res. image, or multiple e.g. time series), (ii) the size "
+      "of the patches to sample, and (iii) the output images of patches which will "
+      "be generated at the end of the process. The example below show how to "
+      "set the samples sizes. For a SPOT6 image for instance, the patch size can "
+      "be 64x64 and for an input Sentinel-2 time series the patch size could be "
+      "1x1. Note that if a dimension size is not defined, the largest one will "
+      "be used (i.e. input image dimensions. The number of input sources can be changed "
+      "at runtime by setting the system environment variable " +
+      tf::ENV_VAR_NAME_NSOURCES);
 
     SetDocAuthors("Remi Cresson");
 
@@ -163,36 +169,37 @@ public:
 
     // Input/output images
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
+    for (int i = 1; i < tf::GetNumberOfSources(); i++)
       AddAnInputImage();
 
     // Input vector data
-    AddParameter(ParameterType_InputVectorData, "vec", "Positions of the samples (must be in the same projection as input image)");
+    AddParameter(
+      ParameterType_InputVectorData, "vec", "Positions of the samples (must be in the same projection as input image)");
 
     // No data parameters
     AddParameter(ParameterType_Bool, "usenodata", "Reject samples that have no-data value");
-    MandatoryOff                    ("usenodata");
+    MandatoryOff("usenodata");
 
     // Output label
     AddParameter(ParameterType_OutputImage, "outlabels", "output labels");
-    SetDefaultOutputPixelType              ("outlabels", ImagePixelType_uint8);
-    MandatoryOff                           ("outlabels");
+    SetDefaultOutputPixelType("outlabels", ImagePixelType_uint8);
+    MandatoryOff("outlabels");
 
     // Class field
     AddParameter(ParameterType_String, "field", "field of class in the vector data");
 
     // Examples values
-    SetDocExampleParameterValue("vec",                "points.sqlite");
-    SetDocExampleParameterValue("source1.il",         "$s2_list");
+    SetDocExampleParameterValue("vec", "points.sqlite");
+    SetDocExampleParameterValue("source1.il", "$s2_list");
     SetDocExampleParameterValue("source1.patchsizex", "16");
     SetDocExampleParameterValue("source1.patchsizey", "16");
-    SetDocExampleParameterValue("field",              "class");
-    SetDocExampleParameterValue("source1.out",        "outpatches_16x16.tif");
-    SetDocExampleParameterValue("outlabels",          "outlabels.tif");
-
+    SetDocExampleParameterValue("field", "class");
+    SetDocExampleParameterValue("source1.out", "outpatches_16x16.tif");
+    SetDocExampleParameterValue("outlabels", "outlabels.tif");
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
 
     PrepareInputs();
@@ -201,12 +208,12 @@ public:
     SamplerType::Pointer sampler = SamplerType::New();
     sampler->SetInputVectorData(GetParameterVectorData("vec"));
     sampler->SetField(GetParameterAsString("field"));
-    if (GetParameterInt("usenodata")==1)
-      {
+    if (GetParameterInt("usenodata") == 1)
+    {
       otbAppLogINFO("Rejecting samples that have at least one no-data value");
       sampler->SetRejectPatchesWithNodata(true);
-      }
-    for (auto& bundle: m_Bundles)
+    }
+    for (auto & bundle : m_Bundles)
     {
       sampler->PushBackInputWithPatchSize(bundle.m_ImageSource.Get(), bundle.m_PatchSize, bundle.m_NoDataValue);
     }
@@ -220,7 +227,7 @@ public:
     otbAppLogINFO("Number of samples rejected : " << sampler->GetNumberOfRejectedSamples());
 
     // Save patches image
-    for (unsigned int i = 0 ; i < m_Bundles.size() ; i++)
+    for (unsigned int i = 0; i < m_Bundles.size(); i++)
     {
       SetParameterOutputImage(m_Bundles[i].m_KeyOut, sampler->GetOutputPatchImages()[i]);
     }
@@ -231,20 +238,19 @@ public:
     {
       SetParameterOutputImage("outlabels", sampler->GetOutputLabelImage());
     }
-
-  }
-  
-  
-  void DoUpdateParameters()
-  {
   }
 
+
+  void
+  DoUpdateParameters()
+  {}
+
 private:
   std::vector<SourceBundle> m_Bundles;
 
 }; // end of class
 
-} // end namespace wrapper
+} // namespace Wrapper
 } // end namespace otb
 
 OTB_APPLICATION_EXPORT(otb::Wrapper::PatchesExtraction)
diff --git a/app/otbPatchesSelection.cxx b/app/otbPatchesSelection.cxx
index 5d8165a0..186a5828 100644
--- a/app/otbPatchesSelection.cxx
+++ b/app/otbPatchesSelection.cxx
@@ -32,16 +32,17 @@
 #include "itkImageRegionConstIteratorWithOnlyIndex.h"
 
 // Functor to retrieve nodata
-template<class TPixel, class OutputPixel>
+template <class TPixel, class OutputPixel>
 class IsNoData
 {
 public:
-  IsNoData(){}
-  ~IsNoData(){}
+  IsNoData() {}
+  ~IsNoData() {}
 
-  inline OutputPixel operator()( const TPixel & A ) const
+  inline OutputPixel
+  operator()(const TPixel & A) const
   {
-    for (unsigned int band = 0 ; band < A.Size() ; band++)
+    for (unsigned int band = 0; band < A.Size(); band++)
     {
       if (A[band] != m_NoDataValue)
         return 1;
@@ -49,7 +50,8 @@ public:
     return 0;
   }
 
-  void SetNoDataValue(typename TPixel::ValueType value)
+  void
+  SetNoDataValue(typename TPixel::ValueType value)
   {
     m_NoDataValue = value;
   }
@@ -68,47 +70,48 @@ class PatchesSelection : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef PatchesSelection                    Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
+  typedef PatchesSelection              Self;
+  typedef Application                   Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(PatchesSelection, Application);
 
   /** Vector data typedefs */
-  typedef VectorDataType::DataTreeType                 DataTreeType;
-  typedef itk::PreOrderTreeIterator<DataTreeType>      TreeIteratorType;
-  typedef VectorDataType::DataNodeType                 DataNodeType;
-  typedef DataNodeType::Pointer                        DataNodePointer;
-  typedef DataNodeType::PointType                      DataNodePointType;
+  typedef VectorDataType::DataTreeType            DataTreeType;
+  typedef itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
+  typedef VectorDataType::DataNodeType            DataNodeType;
+  typedef DataNodeType::Pointer                   DataNodePointer;
+  typedef DataNodeType::PointType                 DataNodePointType;
 
   /** typedefs */
-  typedef IsNoData<FloatVectorImageType::PixelType, UInt8ImageType::PixelType > IsNoDataFunctorType;
+  typedef IsNoData<FloatVectorImageType::PixelType, UInt8ImageType::PixelType>                    IsNoDataFunctorType;
   typedef itk::UnaryFunctorImageFilter<FloatVectorImageType, UInt8ImageType, IsNoDataFunctorType> IsNoDataFilterType;
 
-  typedef itk::FlatStructuringElement<2>                                         StructuringType;
-  typedef StructuringType::RadiusType                                            RadiusType;
+  typedef itk::FlatStructuringElement<2> StructuringType;
+  typedef StructuringType::RadiusType    RadiusType;
 
   typedef itk::BinaryErodeImageFilter<UInt8ImageType, UInt8ImageType, StructuringType> MorphoFilterType;
 
-  typedef otb::StreamingResampleImageFilter<UInt8ImageType,UInt8ImageType> PadFilterType;
-  typedef itk::NearestNeighborInterpolateImageFunction<UInt8ImageType> NNInterpolatorType;
+  typedef otb::StreamingResampleImageFilter<UInt8ImageType, UInt8ImageType> PadFilterType;
+  typedef itk::NearestNeighborInterpolateImageFunction<UInt8ImageType>      NNInterpolatorType;
 
   typedef tf::Distribution<UInt8ImageType> DistributionType;
 
   typedef itk::MaskImageFilter<UInt8ImageType, UInt8ImageType, UInt8ImageType> MaskImageFilterType;
 
-  void DoInit()
+  void
+  DoInit()
   {
 
     // Documentation
     SetName("PatchesSelection");
     SetDescription("This application generate points sampled at regular interval over "
-        "the input image region. The grid size and spacing can be configured.");
+                   "the input image region. The grid size and spacing can be configured.");
     SetDocLongDescription("This application produces a vector data containing "
-        "a set of points centered on the patches lying in the valid regions of the input image. ");
+                          "a set of points centered on the patches lying in the valid regions of the input image. ");
 
     SetDocAuthors("Remi Cresson");
 
@@ -119,123 +122,142 @@ public:
 
     // Input no-data value
     AddParameter(ParameterType_Float, "nodata", "nodata value");
-    MandatoryOn                      ("nodata");
-    SetDefaultParameterFloat         ("nodata", 0);
-    AddParameter(ParameterType_Bool,  "nocheck", "If on, no check on the validity of patches is performed");
-    MandatoryOff                     ("nocheck");
+    MandatoryOn("nodata");
+    SetDefaultParameterFloat("nodata", 0);
+    AddParameter(ParameterType_Bool, "nocheck", "If on, no check on the validity of patches is performed");
+    MandatoryOff("nocheck");
 
     // Grid
     AddParameter(ParameterType_Group, "grid", "grid settings");
     AddParameter(ParameterType_Int, "grid.step", "step between patches");
-    SetMinimumParameterIntValue    ("grid.step", 1);
+    SetMinimumParameterIntValue("grid.step", 1);
     AddParameter(ParameterType_Int, "grid.psize", "patches size");
-    SetMinimumParameterIntValue    ("grid.psize", 1);
+    SetMinimumParameterIntValue("grid.psize", 1);
     AddParameter(ParameterType_Int, "grid.offsetx", "offset of the grid (x axis)");
-    SetDefaultParameterInt         ("grid.offsetx", 0);
+    SetDefaultParameterInt("grid.offsetx", 0);
     AddParameter(ParameterType_Int, "grid.offsety", "offset of the grid (y axis)");
-    SetDefaultParameterInt         ("grid.offsety", 0);
+    SetDefaultParameterInt("grid.offsety", 0);
 
     // Strategy
     AddParameter(ParameterType_Choice, "strategy", "Selection strategy for validation/training patches");
     AddChoice("strategy.chessboard", "fifty fifty, like a chess board");
     AddChoice("strategy.balanced", "you can chose the degree of spatial randomness vs class balance");
-    AddParameter(ParameterType_Float, "strategy.balanced.sp", "Spatial proportion: between 0 and 1, "
-        "indicating the amount of randomly sampled data in space");
-    SetMinimumParameterFloatValue    ("strategy.balanced.sp", 0);
-    SetMaximumParameterFloatValue    ("strategy.balanced.sp", 1);
-    SetDefaultParameterFloat         ("strategy.balanced.sp", 0.25);
-    AddParameter(ParameterType_Int,   "strategy.balanced.nclasses", "Number of classes");
-    SetMinimumParameterIntValue      ("strategy.balanced.nclasses", 2);
-    MandatoryOn                      ("strategy.balanced.nclasses");
+    AddParameter(ParameterType_Float,
+                 "strategy.balanced.sp",
+                 "Spatial proportion: between 0 and 1, "
+                 "indicating the amount of randomly sampled data in space");
+    SetMinimumParameterFloatValue("strategy.balanced.sp", 0);
+    SetMaximumParameterFloatValue("strategy.balanced.sp", 1);
+    SetDefaultParameterFloat("strategy.balanced.sp", 0.25);
+    AddParameter(ParameterType_Int, "strategy.balanced.nclasses", "Number of classes");
+    SetMinimumParameterIntValue("strategy.balanced.nclasses", 2);
+    MandatoryOn("strategy.balanced.nclasses");
     AddParameter(ParameterType_InputImage, "strategy.balanced.labelimage", "input label image");
-    MandatoryOn                           ("strategy.balanced.labelimage");
+    MandatoryOn("strategy.balanced.labelimage");
 
     // Output points
     AddParameter(ParameterType_OutputVectorData, "outtrain", "output set of points (training)");
     AddParameter(ParameterType_OutputVectorData, "outvalid", "output set of points (validation)");
 
     AddRAMParameter();
-
   }
 
   class SampleBundle
   {
   public:
-    SampleBundle(){}
-    explicit SampleBundle(unsigned int nClasses): dist(DistributionType(nClasses)), id(0), black(true){
-      (void) point;
-      (void) index;
+    SampleBundle() {}
+    explicit SampleBundle(unsigned int nClasses)
+      : dist(DistributionType(nClasses))
+      , id(0)
+      , black(true)
+    {
+      (void)point;
+      (void)index;
     }
-    ~SampleBundle(){}
-
-    SampleBundle(const SampleBundle & other): dist(other.GetDistribution()), id(other.GetSampleID()),
-      point(other.GetPosition()), black(other.GetBlack()), index(other.GetIndex())
+    ~SampleBundle() {}
+
+    SampleBundle(const SampleBundle & other)
+      : dist(other.GetDistribution())
+      , id(other.GetSampleID())
+      , point(other.GetPosition())
+      , black(other.GetBlack())
+      , index(other.GetIndex())
     {}
 
-    DistributionType GetDistribution() const
+    DistributionType
+    GetDistribution() const
     {
       return dist;
     }
 
-    DistributionType& GetModifiableDistribution()
+    DistributionType &
+    GetModifiableDistribution()
     {
       return dist;
     }
 
-    unsigned int GetSampleID() const
+    unsigned int
+    GetSampleID() const
     {
       return id;
     }
 
-    unsigned int& GetModifiableSampleID()
+    unsigned int &
+    GetModifiableSampleID()
     {
       return id;
     }
 
-    DataNodePointType GetPosition() const
+    DataNodePointType
+    GetPosition() const
     {
       return point;
     }
 
-    DataNodePointType& GetModifiablePosition()
+    DataNodePointType &
+    GetModifiablePosition()
     {
       return point;
     }
 
-    bool& GetModifiableBlack()
+    bool &
+    GetModifiableBlack()
     {
       return black;
     }
 
-    bool GetBlack() const
+    bool
+    GetBlack() const
     {
       return black;
     }
 
-    UInt8ImageType::IndexType& GetModifiableIndex()
+    UInt8ImageType::IndexType &
+    GetModifiableIndex()
     {
       return index;
     }
 
-    UInt8ImageType::IndexType GetIndex() const
+    UInt8ImageType::IndexType
+    GetIndex() const
     {
       return index;
     }
 
   private:
-
-    DistributionType dist;
-    unsigned int id;
-    DataNodePointType point;
-    bool black;
+    DistributionType          dist;
+    unsigned int              id;
+    DataNodePointType         point;
+    bool                      black;
     UInt8ImageType::IndexType index;
   };
 
   /*
    * Apply the given function at each sampling location, checking if the patch is valid or not
    */
-  template<typename TLambda>
-  void Apply(TLambda lambda)
+  template <typename TLambda>
+  void
+  Apply(TLambda lambda)
   {
 
     int userOffX = GetParameterInt("grid.offsetx");
@@ -247,49 +269,49 @@ public:
 
     // Explicit streaming over the morphed mask, based on the RAM parameter
     typedef otb::RAMDrivenStrippedStreamingManager<UInt8ImageType> StreamingManagerType;
-    StreamingManagerType::Pointer m_StreamingManager = StreamingManagerType::New();
+    StreamingManagerType::Pointer                                  m_StreamingManager = StreamingManagerType::New();
     m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
 
     UInt8ImageType::Pointer inputImage;
-    bool readInput = true;
-    if (GetParameterInt("nocheck")==1)
-      {
+    bool                    readInput = true;
+    if (GetParameterInt("nocheck") == 1)
+    {
       otbAppLogINFO("\"nocheck\" mode is enabled. Input image pixels no-data values will not be checked.");
       if (HasValue("mask"))
-        {
+      {
         otbAppLogINFO("Using the provided \"mask\" parameter.");
         inputImage = GetParameterUInt8Image("mask");
-        }
+      }
       else
-        {
+      {
         // This is just a hack to not trigger the whole morpho/pad pipeline
         inputImage = m_NoDataFilter->GetOutput();
         readInput = false;
-        }
       }
+    }
     else
-      {
+    {
       inputImage = m_MorphoFilter->GetOutput();
 
       // Offset update because the morpho filter pads the input image with 1 pixel border
       userOffX += 1;
       userOffY += 1;
-      }
+    }
     UInt8ImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
     entireRegion.ShrinkByRadius(m_Radius);
-    m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
+    m_StreamingManager->PrepareStreaming(inputImage, entireRegion);
     UInt8ImageType::IndexType start;
     start[0] = m_Radius[0] + 1;
     start[1] = m_Radius[1] + 1;
 
-    int m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
-    UInt8ImageType::IndexType pos;
+    int                            m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
+    UInt8ImageType::IndexType      pos;
     UInt8ImageType::IndexValueType step = GetParameterInt("grid.step");
     pos.Fill(0);
 
     // Offset update
-    userOffX %= step ;
-    userOffY %= step ;
+    userOffX %= step;
+    userOffY %= step;
 
     for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
     {
@@ -297,7 +319,7 @@ public:
 
       UInt8ImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
       tf::PropagateRequestedRegion<UInt8ImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<UInt8ImageType> inIt (inputImage, streamRegion);
+      itk::ImageRegionConstIterator<UInt8ImageType> inIt(inputImage, streamRegion);
 
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
@@ -333,7 +355,6 @@ public:
           }
         }
       }
-
     }
   }
 
@@ -345,21 +366,24 @@ public:
   {
     // Nb of samples (maximum)
     const UInt8ImageType::RegionType entireRegion = m_NoDataFilter->GetOutput()->GetLargestPossibleRegion();
-    const unsigned int maxNbOfCols = std::ceil(entireRegion.GetSize(0)/GetParameterInt("grid.step")) + 1;
-    const unsigned int maxNbOfRows = std::ceil(entireRegion.GetSize(1)/GetParameterInt("grid.step")) + 1;
-    unsigned int maxNbOfSamples = 1;
+    const unsigned int maxNbOfCols = std::ceil(entireRegion.GetSize(0) / GetParameterInt("grid.step")) + 1;
+    const unsigned int maxNbOfRows = std::ceil(entireRegion.GetSize(1) / GetParameterInt("grid.step")) + 1;
+    unsigned int       maxNbOfSamples = 1;
     maxNbOfSamples *= maxNbOfCols;
     maxNbOfSamples *= maxNbOfRows;
 
     // Nb of classes
-    SampleBundle initSB(nbOfClasses);
+    SampleBundle              initSB(nbOfClasses);
     std::vector<SampleBundle> bundles(maxNbOfSamples, initSB);
 
     return bundles;
   }
 
-  void SetBlackOrWhiteBundle(SampleBundle & bundle, unsigned int & count,
-      const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo)
+  void
+  SetBlackOrWhiteBundle(SampleBundle &                    bundle,
+                        unsigned int &                    count,
+                        const UInt8ImageType::IndexType & pos,
+                        const UInt8ImageType::PointType & geo)
   {
     // Black or white
     bool black = ((pos[0] + pos[1]) % 2 == 0);
@@ -369,20 +393,20 @@ public:
     bundle.GetModifiableBlack() = black;
     bundle.GetModifiableIndex() = pos;
     count++;
-
   }
 
   /*
    * Samples are placed at regular intervals
    */
-  void SampleChessboard()
+  void
+  SampleChessboard()
   {
 
     std::vector<SampleBundle> bundles = AllocateSamples();
 
     unsigned int count = 0;
-    auto lambda = [this, &count, &bundles]
-                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
+    auto         lambda = [this, &count, &bundles](const UInt8ImageType::IndexType & pos,
+                                           const UInt8ImageType::PointType & geo) {
       SetBlackOrWhiteBundle(bundles[count], count, pos, geo);
     };
 
@@ -393,7 +417,8 @@ public:
     PopulateVectorData(bundles);
   }
 
-  void SampleBalanced()
+  void
+  SampleBalanced()
   {
 
     // 1. Compute distribution of all samples
@@ -406,12 +431,13 @@ public:
     UInt8ImageType::SizeType patchSize;
     patchSize.Fill(GetParameterInt("grid.psize"));
     unsigned int count = 0;
-    auto lambda = [this, &bundles, &patchSize, &count]
-                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
-
+    auto         lambda = [this, &bundles, &patchSize, &count](const UInt8ImageType::IndexType & pos,
+                                                       const UInt8ImageType::PointType & geo) {
       // Update this sample distribution
       if (tf::UpdateDistributionFromPatch<UInt8ImageType>(GetParameterUInt8Image("strategy.balanced.labelimage"),
-          geo, patchSize, bundles[count].GetModifiableDistribution()))
+                                                          geo,
+                                                          patchSize,
+                                                          bundles[count].GetModifiableDistribution()))
       {
         SetBlackOrWhiteBundle(bundles[count], count, pos, geo);
       }
@@ -420,7 +446,7 @@ public:
     Apply(lambda);
     bundles.resize(count);
 
-    otbAppLogINFO("Total number of candidates: " << count );
+    otbAppLogINFO("Total number of candidates: " << count);
 
     // 2. Seed = spatially random samples
 
@@ -430,13 +456,13 @@ public:
 
     otbAppLogINFO("Spatial sampling step " << samplingStep);
 
-    float step = 0;
+    float                     step = 0;
     std::vector<SampleBundle> seed(count);
     std::vector<SampleBundle> candidates(count);
 
     unsigned int seedCount = 0;
     unsigned int candidatesCount = 0;
-    for (auto& d: bundles)
+    for (auto & d : bundles)
     {
       if (d.GetIndex()[0] % samplingStep + d.GetIndex()[1] % samplingStep == 0)
       {
@@ -462,18 +488,19 @@ public:
 
     float removalRate = static_cast<float>(seedCount) / static_cast<float>(nbToRemove);
     float removalStep = 0;
-    auto removeSamples = [&removalStep, &removalRate](SampleBundle & b) -> bool {
-      (void) b;
+    auto  removeSamples = [&removalStep, &removalRate](SampleBundle & b) -> bool {
+      (void)b;
       bool ret = false;
       if (removalStep >= removalRate)
-        {
+      {
         removalStep = fmod(removalStep, removalRate);
         ret = true;
-        }
+      }
       else
         ret = false;
       removalStep++;
-      return ret;;
+      return ret;
+      ;
     };
     auto iterator = std::remove_if(seed.begin(), seed.end(), removeSamples);
     seed.erase(iterator, seed.end());
@@ -483,8 +510,8 @@ public:
     // 3. Compute seed distribution
 
     const unsigned int nbOfClasses = GetParameterInt("strategy.balanced.nclasses");
-    DistributionType seedDist(nbOfClasses);
-    for (auto& d: seed)
+    DistributionType   seedDist(nbOfClasses);
+    for (auto & d : seed)
       seedDist.Update(d.GetDistribution());
 
     otbAppLogINFO("Spatial seed distribution: " << seedDist.ToString());
@@ -494,16 +521,16 @@ public:
     otbAppLogINFO("Balance seed candidates size: " << candidates.size());
 
     // Sort by cos
-    auto comparator = [&seedDist](const SampleBundle & a, const SampleBundle & b) -> bool{
+    auto comparator = [&seedDist](const SampleBundle & a, const SampleBundle & b) -> bool {
       return a.GetDistribution().Cosinus(seedDist) > b.GetDistribution().Cosinus(seedDist);
     };
     sort(candidates.begin(), candidates.end(), comparator);
 
     DistributionType idealDist(nbOfClasses, 1.0 / std::sqrt(static_cast<float>(nbOfClasses)));
-    float minCos = 0;
-    unsigned int samplesAdded = 0;
-    seed.resize(seed.size()+candidates.size(), SampleBundle(nbOfClasses));
-    while(candidates.size() > 0)
+    float            minCos = 0;
+    unsigned int     samplesAdded = 0;
+    seed.resize(seed.size() + candidates.size(), SampleBundle(nbOfClasses));
+    while (candidates.size() > 0)
     {
       // Get the less correlated sample
       SampleBundle candidate = candidates.back();
@@ -535,22 +562,23 @@ public:
     PopulateVectorData(seed);
   }
 
-  void PopulateVectorData(const std::vector<SampleBundle> & samples)
+  void
+  PopulateVectorData(const std::vector<SampleBundle> & samples)
   {
     // Get data tree
     DataTreeType::Pointer treeTrain = m_OutVectorDataTrain->GetDataTree();
     DataTreeType::Pointer treeValid = m_OutVectorDataValid->GetDataTree();
-    DataNodePointer rootTrain = treeTrain->GetRoot()->Get();
-    DataNodePointer rootValid = treeValid->GetRoot()->Get();
-    DataNodePointer documentTrain = DataNodeType::New();
-    DataNodePointer documentValid = DataNodeType::New();
+    DataNodePointer       rootTrain = treeTrain->GetRoot()->Get();
+    DataNodePointer       rootValid = treeValid->GetRoot()->Get();
+    DataNodePointer       documentTrain = DataNodeType::New();
+    DataNodePointer       documentValid = DataNodeType::New();
     documentTrain->SetNodeType(DOCUMENT);
     documentValid->SetNodeType(DOCUMENT);
     treeTrain->Add(documentTrain, rootTrain);
     treeValid->Add(documentValid, rootValid);
 
     unsigned int id = 0;
-    for (const auto& sample: samples)
+    for (const auto & sample : samples)
     {
       // Add point to the VectorData tree
       DataNodePointer newDataNode = DataNodeType::New();
@@ -569,11 +597,11 @@ public:
         // Valid
         treeValid->Add(newDataNode, documentValid);
       }
-
     }
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
     otbAppLogINFO("Grid step : " << this->GetParameterInt("grid.step"));
     otbAppLogINFO("Patch size : " << this->GetParameterInt("grid.psize"));
@@ -587,7 +615,7 @@ public:
 
     // If mask available, use it
     if (HasValue("mask"))
-      {
+    {
       if (GetParameterUInt8Image("mask")->GetLargestPossibleRegion().GetSize() !=
           GetParameterFloatVectorImage("in")->GetLargestPossibleRegion().GetSize())
         otbAppLogFATAL("Mask must have the same size as the input image!");
@@ -596,24 +624,24 @@ public:
       m_MaskImageFilter->SetMaskImage(GetParameterUInt8Image("mask"));
       m_MaskImageFilter->UpdateOutputInformation();
       src = m_MaskImageFilter->GetOutput();
-      }
+    }
 
     // Padding 1 pixel
     UInt8ImageType::SizeType size = src->GetLargestPossibleRegion().GetSize();
     size[0] += 2;
     size[1] += 2;
     UInt8ImageType::SpacingType spacing = src->GetSignedSpacing();
-    UInt8ImageType::PointType origin = src->GetOrigin();
+    UInt8ImageType::PointType   origin = src->GetOrigin();
     origin[0] -= spacing[0];
     origin[1] -= spacing[1];
     m_PadFilter = PadFilterType::New();
     NNInterpolatorType::Pointer nnInterpolator = NNInterpolatorType::New();
     m_PadFilter->SetInterpolator(nnInterpolator);
-    m_PadFilter->SetInput( src );
+    m_PadFilter->SetInput(src);
     m_PadFilter->SetOutputOrigin(origin);
     m_PadFilter->SetOutputSpacing(spacing);
     m_PadFilter->SetOutputSize(size);
-    m_PadFilter->SetEdgePaddingValue( 0 );
+    m_PadFilter->SetEdgePaddingValue(0);
     m_PadFilter->UpdateOutputInformation();
 
     // Morpho
@@ -646,17 +674,16 @@ public:
       SampleBalanced();
     }
 
-    otbAppLogINFO( "Writing output samples positions");
+    otbAppLogINFO("Writing output samples positions");
 
     SetParameterOutputVectorData("outtrain", m_OutVectorDataTrain);
     SetParameterOutputVectorData("outvalid", m_OutVectorDataValid);
-
   }
 
 
-  void DoUpdateParameters()
-  {
-  }
+  void
+  DoUpdateParameters()
+  {}
 
 private:
   RadiusType                   m_Radius;
@@ -668,7 +695,7 @@ private:
   MaskImageFilterType::Pointer m_MaskImageFilter;
 }; // end of class
 
-} // end namespace wrapper
+} // namespace Wrapper
 } // end namespace otb
 
-OTB_APPLICATION_EXPORT( otb::Wrapper::PatchesSelection )
+OTB_APPLICATION_EXPORT(otb::Wrapper::PatchesSelection)
diff --git a/app/otbTensorflowModelServe.cxx b/app/otbTensorflowModelServe.cxx
index 47a8c957..b9f74dfc 100644
--- a/app/otbTensorflowModelServe.cxx
+++ b/app/otbTensorflowModelServe.cxx
@@ -42,10 +42,10 @@ class TensorflowModelServe : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef TensorflowModelServe                       Self;
-  typedef Application                                Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
+  typedef TensorflowModelServe          Self;
+  typedef Application                   Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
@@ -53,10 +53,10 @@ public:
 
   /** Typedefs for tensorflow */
   typedef otb::TensorflowMultisourceModelFilter<FloatVectorImageType, FloatVectorImageType> TFModelFilterType;
-  typedef otb::TensorflowSource<FloatVectorImageType> InputImageSource;
+  typedef otb::TensorflowSource<FloatVectorImageType>                                       InputImageSource;
 
   /** Typedef for streaming */
-  typedef otb::ImageRegionSquareTileSplitter<FloatVectorImageType::ImageDimension> TileSplitterType;
+  typedef otb::ImageRegionSquareTileSplitter<FloatVectorImageType::ImageDimension>  TileSplitterType;
   typedef otb::TensorflowStreamerFilter<FloatVectorImageType, FloatVectorImageType> StreamingFilterType;
 
   /** Typedefs for images */
@@ -83,152 +83,164 @@ public:
   // -an input image list
   // -an input patchsize (dimensions of samples)
   //
-  void AddAnInputImage()
+  void
+  AddAnInputImage()
   {
     // Number of source
     unsigned int inputNumber = m_Bundles.size() + 1;
 
     // Create keys and descriptions
-    std::stringstream ss_key_group, ss_desc_group,
-    ss_key_in, ss_desc_in,
-    ss_key_dims_x, ss_desc_dims_x,
-    ss_key_dims_y, ss_desc_dims_y,
-    ss_key_ph, ss_desc_ph;
+    std::stringstream ss_key_group, ss_desc_group, ss_key_in, ss_desc_in, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y,
+      ss_desc_dims_y, ss_key_ph, ss_desc_ph;
 
     // Parameter group key/description
-    ss_key_group  << "source"                  << inputNumber;
+    ss_key_group << "source" << inputNumber;
     ss_desc_group << "Parameters for source #" << inputNumber;
 
     // Parameter group keys
-    ss_key_in      << ss_key_group.str() << ".il";
-    ss_key_dims_x  << ss_key_group.str() << ".rfieldx";
-    ss_key_dims_y  << ss_key_group.str() << ".rfieldy";
-    ss_key_ph      << ss_key_group.str() << ".placeholder";
+    ss_key_in << ss_key_group.str() << ".il";
+    ss_key_dims_x << ss_key_group.str() << ".rfieldx";
+    ss_key_dims_y << ss_key_group.str() << ".rfieldy";
+    ss_key_ph << ss_key_group.str() << ".placeholder";
 
     // Parameter group descriptions
-    ss_desc_in     << "Input image (or list to stack) for source #" << inputNumber;
-    ss_desc_dims_x << "Input receptive field (width) for source #"  << inputNumber;
+    ss_desc_in << "Input image (or list to stack) for source #" << inputNumber;
+    ss_desc_dims_x << "Input receptive field (width) for source #" << inputNumber;
     ss_desc_dims_y << "Input receptive field (height) for source #" << inputNumber;
-    ss_desc_ph     << "Name of the input placeholder for source #"  << inputNumber;
+    ss_desc_ph << "Name of the input placeholder for source #" << inputNumber;
 
     // Populate group
-    AddParameter(ParameterType_Group,          ss_key_group.str(),  ss_desc_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_in.str(),     ss_desc_in.str() );
-    AddParameter(ParameterType_Int,            ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue               (ss_key_dims_x.str(), 1);
-    SetDefaultParameterInt                    (ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int,            ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue               (ss_key_dims_y.str(), 1);
-    SetDefaultParameterInt                    (ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_String,         ss_key_ph.str(),     ss_desc_ph.str());
-    MandatoryOff                              (ss_key_ph.str());
+    AddParameter(ParameterType_Group, ss_key_group.str(), ss_desc_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_in.str(), ss_desc_in.str());
+    AddParameter(ParameterType_Int, ss_key_dims_x.str(), ss_desc_dims_x.str());
+    SetMinimumParameterIntValue(ss_key_dims_x.str(), 1);
+    SetDefaultParameterInt(ss_key_dims_x.str(), 1);
+    AddParameter(ParameterType_Int, ss_key_dims_y.str(), ss_desc_dims_y.str());
+    SetMinimumParameterIntValue(ss_key_dims_y.str(), 1);
+    SetDefaultParameterInt(ss_key_dims_y.str(), 1);
+    AddParameter(ParameterType_String, ss_key_ph.str(), ss_desc_ph.str());
+    MandatoryOff(ss_key_ph.str());
 
     // Add a new bundle
     ProcessObjectsBundle bundle;
-    bundle.m_KeyIn     = ss_key_in.str();
-    bundle.m_KeyPszX   = ss_key_dims_x.str();
-    bundle.m_KeyPszY   = ss_key_dims_y.str();
+    bundle.m_KeyIn = ss_key_in.str();
+    bundle.m_KeyPszX = ss_key_dims_x.str();
+    bundle.m_KeyPszY = ss_key_dims_y.str();
     bundle.m_KeyPHName = ss_key_ph.str();
 
     m_Bundles.push_back(bundle);
-
   }
 
-  void DoInit()
+  void
+  DoInit()
   {
 
     // Documentation
     SetName("TensorflowModelServe");
-    SetDescription("Multisource deep learning classifier using TensorFlow. Change the "
-        + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of sources.");
+    SetDescription("Multisource deep learning classifier using TensorFlow. Change the " + tf::ENV_VAR_NAME_NSOURCES +
+                   " environment variable to set the number of sources.");
     SetDocLongDescription("The application run a TensorFlow model over multiple data sources. "
-        "The number of input sources can be changed at runtime by setting the system "
-        "environment variable " + tf::ENV_VAR_NAME_NSOURCES + ". For each source, you have to "
-        "set (1) the placeholder name, as named in the TensorFlow model, (2) the receptive "
-        "field and (3) the image(s) source. The output is a multiband image, stacking all "
-        "outputs tensors together: you have to specify (1) the names of the output tensors, as "
-        "named in the TensorFlow model (typically, an operator's output) and (2) the expression "
-        "field of each output tensor. The output tensors values will be stacked in the same "
-        "order as they appear in the \"model.output\" parameter (you can use a space separator "
-        "between names). You might consider to use extended filename to bypass the automatic "
-        "memory footprint calculator of the otb application engine, and set a good splitting "
-        "strategy (Square tiles is good for convolutional networks) or use the \"optim\" "
-        "parameter group to impose your squared tiles sizes");
+                          "The number of input sources can be changed at runtime by setting the system "
+                          "environment variable " +
+                          tf::ENV_VAR_NAME_NSOURCES +
+                          ". For each source, you have to "
+                          "set (1) the placeholder name, as named in the TensorFlow model, (2) the receptive "
+                          "field and (3) the image(s) source. The output is a multiband image, stacking all "
+                          "outputs tensors together: you have to specify (1) the names of the output tensors, as "
+                          "named in the TensorFlow model (typically, an operator's output) and (2) the expression "
+                          "field of each output tensor. The output tensors values will be stacked in the same "
+                          "order as they appear in the \"model.output\" parameter (you can use a space separator "
+                          "between names). You might consider to use extended filename to bypass the automatic "
+                          "memory footprint calculator of the otb application engine, and set a good splitting "
+                          "strategy (Square tiles is good for convolutional networks) or use the \"optim\" "
+                          "parameter group to impose your squared tiles sizes");
     SetDocAuthors("Remi Cresson");
 
     AddDocTag(Tags::Learning);
 
     // Input/output images
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
+    for (int i = 1; i < tf::GetNumberOfSources(); i++)
       AddAnInputImage();
 
     // Input model
-    AddParameter(ParameterType_Group,         "model",           "model parameters");
-    AddParameter(ParameterType_Directory,     "model.dir",       "TensorFlow SavedModel directory");
-    MandatoryOn                              ("model.dir");
-    SetParameterDescription                  ("model.dir", "The model directory should contains the model Google Protobuf (.pb) and variables");
-
-    AddParameter(ParameterType_StringList,    "model.userplaceholders",    "Additional single-valued placeholders. Supported types: int, float, bool.");
-    MandatoryOff                             ("model.userplaceholders");
-    SetParameterDescription                  ("model.userplaceholders", "Syntax to use is \"placeholder_1=value_1 ... placeholder_N=value_N\"");
-    AddParameter(ParameterType_Bool,          "model.fullyconv", "Fully convolutional");
-    MandatoryOff                             ("model.fullyconv");
-    AddParameter(ParameterType_StringList,    "model.tagsets",    "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
-    MandatoryOff                             ("model.tagsets");
+    AddParameter(ParameterType_Group, "model", "model parameters");
+    AddParameter(ParameterType_Directory, "model.dir", "TensorFlow SavedModel directory");
+    MandatoryOn("model.dir");
+    SetParameterDescription("model.dir",
+                            "The model directory should contains the model Google Protobuf (.pb) and variables");
+
+    AddParameter(ParameterType_StringList,
+                 "model.userplaceholders",
+                 "Additional single-valued placeholders. Supported types: int, float, bool.");
+    MandatoryOff("model.userplaceholders");
+    SetParameterDescription("model.userplaceholders",
+                            "Syntax to use is \"placeholder_1=value_1 ... placeholder_N=value_N\"");
+    AddParameter(ParameterType_Bool, "model.fullyconv", "Fully convolutional");
+    MandatoryOff("model.fullyconv");
+    AddParameter(ParameterType_StringList,
+                 "model.tagsets",
+                 "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is "
+                 "supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
+    MandatoryOff("model.tagsets");
 
     // Output tensors parameters
-    AddParameter(ParameterType_Group,         "output",          "Output tensors parameters");
-    AddParameter(ParameterType_Float,         "output.spcscale", "The output spacing scale, related to the first input");
-    SetDefaultParameterFloat                 ("output.spcscale", 1.0);
-    SetParameterDescription                  ("output.spcscale", "The output image size/scale and spacing*scale where size and spacing corresponds to the first input");
-    AddParameter(ParameterType_StringList,    "output.names",    "Names of the output tensors");
-    MandatoryOff                            ("output.names");
+    AddParameter(ParameterType_Group, "output", "Output tensors parameters");
+    AddParameter(ParameterType_Float, "output.spcscale", "The output spacing scale, related to the first input");
+    SetDefaultParameterFloat("output.spcscale", 1.0);
+    SetParameterDescription(
+      "output.spcscale",
+      "The output image size/scale and spacing*scale where size and spacing corresponds to the first input");
+    AddParameter(ParameterType_StringList, "output.names", "Names of the output tensors");
+    MandatoryOff("output.names");
 
     // Output Field of Expression
-    AddParameter(ParameterType_Int,           "output.efieldx", "The output expression field (width)");
-    SetMinimumParameterIntValue              ("output.efieldx", 1);
-    SetDefaultParameterInt                   ("output.efieldx", 1);
-    MandatoryOn                              ("output.efieldx");
-    AddParameter(ParameterType_Int,           "output.efieldy", "The output expression field (height)");
-    SetMinimumParameterIntValue              ("output.efieldy", 1);
-    SetDefaultParameterInt                   ("output.efieldy", 1);
-    MandatoryOn                              ("output.efieldy");
+    AddParameter(ParameterType_Int, "output.efieldx", "The output expression field (width)");
+    SetMinimumParameterIntValue("output.efieldx", 1);
+    SetDefaultParameterInt("output.efieldx", 1);
+    MandatoryOn("output.efieldx");
+    AddParameter(ParameterType_Int, "output.efieldy", "The output expression field (height)");
+    SetMinimumParameterIntValue("output.efieldy", 1);
+    SetDefaultParameterInt("output.efieldy", 1);
+    MandatoryOn("output.efieldy");
 
     // Fine tuning
-    AddParameter(ParameterType_Group,         "optim" , "This group of parameters allows optimization of processing time");
-    AddParameter(ParameterType_Bool,          "optim.disabletiling", "Disable tiling");
-    MandatoryOff                             ("optim.disabletiling");
-    SetParameterDescription                  ("optim.disabletiling", "Tiling avoids to process a too large subset of image, but sometimes it can be useful to disable it");
-    AddParameter(ParameterType_Int,           "optim.tilesizex", "Tile width used to stream the filter output");
-    SetMinimumParameterIntValue              ("optim.tilesizex", 1);
-    SetDefaultParameterInt                   ("optim.tilesizex", 16);
-    AddParameter(ParameterType_Int,           "optim.tilesizey", "Tile height used to stream the filter output");
-    SetMinimumParameterIntValue              ("optim.tilesizey", 1);
-    SetDefaultParameterInt                   ("optim.tilesizey", 16);
+    AddParameter(ParameterType_Group, "optim", "This group of parameters allows optimization of processing time");
+    AddParameter(ParameterType_Bool, "optim.disabletiling", "Disable tiling");
+    MandatoryOff("optim.disabletiling");
+    SetParameterDescription(
+      "optim.disabletiling",
+      "Tiling avoids to process a too large subset of image, but sometimes it can be useful to disable it");
+    AddParameter(ParameterType_Int, "optim.tilesizex", "Tile width used to stream the filter output");
+    SetMinimumParameterIntValue("optim.tilesizex", 1);
+    SetDefaultParameterInt("optim.tilesizex", 16);
+    AddParameter(ParameterType_Int, "optim.tilesizey", "Tile height used to stream the filter output");
+    SetMinimumParameterIntValue("optim.tilesizey", 1);
+    SetDefaultParameterInt("optim.tilesizey", 16);
 
     // Output image
     AddParameter(ParameterType_OutputImage, "out", "output image");
 
     // Example
-    SetDocExampleParameterValue("source1.il",             "spot6pms.tif");
-    SetDocExampleParameterValue("source1.placeholder",    "x1");
-    SetDocExampleParameterValue("source1.rfieldx",        "16");
-    SetDocExampleParameterValue("source1.rfieldy",        "16");
-    SetDocExampleParameterValue("model.dir",              "/tmp/my_saved_model/");
+    SetDocExampleParameterValue("source1.il", "spot6pms.tif");
+    SetDocExampleParameterValue("source1.placeholder", "x1");
+    SetDocExampleParameterValue("source1.rfieldx", "16");
+    SetDocExampleParameterValue("source1.rfieldy", "16");
+    SetDocExampleParameterValue("model.dir", "/tmp/my_saved_model/");
     SetDocExampleParameterValue("model.userplaceholders", "is_training=false dropout=0.0");
-    SetDocExampleParameterValue("output.names",           "out_predict1 out_proba1");
-    SetDocExampleParameterValue("out",                    "\"classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256\"");
-
+    SetDocExampleParameterValue("output.names", "out_predict1 out_proba1");
+    SetDocExampleParameterValue(
+      "out", "\"classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256\"");
   }
 
   //
   // Prepare bundles from the number of points
   //
-  void PrepareInputs()
+  void
+  PrepareInputs()
   {
 
-    for (auto& bundle: m_Bundles)
+    for (auto & bundle : m_Bundles)
     {
       // Setting the image source
       FloatVectorImageListType::Pointer list = GetParameterImageList(bundle.m_KeyIn);
@@ -238,12 +250,13 @@ public:
       bundle.m_PatchSize[1] = GetParameterInt(bundle.m_KeyPszY);
 
       otbAppLogINFO("Source info :");
-      otbAppLogINFO("Receptive field  : " << bundle.m_PatchSize  );
+      otbAppLogINFO("Receptive field  : " << bundle.m_PatchSize);
       otbAppLogINFO("Placeholder name : " << bundle.m_Placeholder);
     }
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
 
     // Load the Tensorflow bundle
@@ -261,8 +274,8 @@ public:
 
     // Get user placeholders
     TFModelFilterType::StringList expressions = GetParameterStringList("model.userplaceholders");
-    TFModelFilterType::DictType dict;
-    for (auto& exp: expressions)
+    TFModelFilterType::DictType   dict;
+    for (auto & exp : expressions)
     {
       TFModelFilterType::DictElementType entry = tf::ExpressionToTensor(exp);
       dict.push_back(entry);
@@ -272,13 +285,13 @@ public:
     m_TFFilter->SetUserPlaceholders(dict);
 
     // Input sources
-    for (auto& bundle: m_Bundles)
+    for (auto & bundle : m_Bundles)
     {
       m_TFFilter->PushBackInputTensorBundle(bundle.m_Placeholder, bundle.m_PatchSize, bundle.m_ImageSource.Get());
     }
 
     // Fully convolutional mode on/off
-    if (GetParameterInt("model.fullyconv")==1)
+    if (GetParameterInt("model.fullyconv") == 1)
     {
       otbAppLogINFO("The TensorFlow model is used in fully convolutional mode");
       m_TFFilter->SetFullyConvolutional(true);
@@ -288,7 +301,7 @@ public:
     FloatVectorImageType::SizeType foe;
     foe[0] = GetParameterInt("output.efieldx");
     foe[1] = GetParameterInt("output.efieldy");
-    m_TFFilter->SetOutputExpressionFields({foe});
+    m_TFFilter->SetOutputExpressionFields({ foe });
 
     otbAppLogINFO("Output field of expression: " << m_TFFilter->GetOutputExpressionFields()[0]);
 
@@ -301,22 +314,22 @@ public:
       tileSize[1] = GetParameterInt("optim.tilesizey");
 
       // Check that the tile size is aligned to the field of expression
-      for (unsigned int i = 0 ; i < FloatVectorImageType::ImageDimension ; i++)
+      for (unsigned int i = 0; i < FloatVectorImageType::ImageDimension; i++)
         if (tileSize[i] % foe[i] != 0)
-          {
+        {
           SizeType::SizeValueType newSize = 1 + std::floor(tileSize[i] / foe[i]);
           newSize *= foe[i];
 
           otbAppLogWARNING("Aligning the tiling to the output expression field "
-              << "for better performances (dim " << i << "). New value set to " << newSize)
+                           << "for better performances (dim " << i << "). New value set to " << newSize)
 
-          tileSize[i] = newSize;
-          }
+            tileSize[i] = newSize;
+        }
 
       otbAppLogINFO("Force tiling with squared tiles of " << tileSize)
 
-      // Force the computation tile by tile
-      m_StreamFilter = StreamingFilterType::New();
+        // Force the computation tile by tile
+        m_StreamFilter = StreamingFilterType::New();
       m_StreamFilter->SetOutputGridSize(tileSize);
       m_StreamFilter->SetInput(m_TFFilter->GetOutput());
 
@@ -328,14 +341,13 @@ public:
       SetParameterOutputImage("out", m_TFFilter->GetOutput());
     }
   }
-  
 
-  void DoUpdateParameters()
-  {
-  }
 
-private:
+  void
+  DoUpdateParameters()
+  {}
 
+private:
   TFModelFilterType::Pointer   m_TFFilter;
   StreamingFilterType::Pointer m_StreamFilter;
   tensorflow::SavedModelBundle m_SavedModel; // must be alive during all the execution of the application !
@@ -344,7 +356,7 @@ private:
 
 }; // end of class
 
-} // namespace wrapper
+} // namespace Wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT( otb::Wrapper::TensorflowModelServe )
+OTB_APPLICATION_EXPORT(otb::Wrapper::TensorflowModelServe)
diff --git a/app/otbTensorflowModelTrain.cxx b/app/otbTensorflowModelTrain.cxx
index e7901998..f5a420a9 100644
--- a/app/otbTensorflowModelTrain.cxx
+++ b/app/otbTensorflowModelTrain.cxx
@@ -42,12 +42,11 @@ namespace Wrapper
 class TensorflowModelTrain : public Application
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowModelTrain                       Self;
-  typedef Application                                Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
+  typedef TensorflowModelTrain          Self;
+  typedef Application                   Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
@@ -77,8 +76,8 @@ public:
     std::string m_KeyInForValid;     // Key of input image list (validation)
     std::string m_KeyPHNameForTrain; // Key for placeholder name in the TensorFlow model (training)
     std::string m_KeyPHNameForValid; // Key for placeholder name in the TensorFlow model (validation)
-    std::string m_KeyPszX;   // Key for samples sizes X
-    std::string m_KeyPszY;   // Key for samples sizes Y
+    std::string m_KeyPszX;           // Key for samples sizes X
+    std::string m_KeyPszY;           // Key for samples sizes Y
   };
 
   /** Typedefs for the app */
@@ -86,9 +85,9 @@ public:
   typedef std::vector<FloatVectorImageType::SizeType> SizeList;
   typedef std::vector<std::string>                    StringList;
 
-  void DoUpdateParameters()
-  {
-  }
+  void
+  DoUpdateParameters()
+  {}
 
   //
   // Add an input source, which includes:
@@ -98,151 +97,161 @@ public:
   // -an input image placeholder (for validation)
   // -an input patchsize, which is the dimensions of samples. Same for training and validation.
   //
-  void AddAnInputImage()
+  void
+  AddAnInputImage()
   {
     // Number of source
     unsigned int inputNumber = m_Bundles.size() + 1;
 
     // Create keys and descriptions
-    std::stringstream ss_key_tr_group, ss_desc_tr_group,
-    ss_key_val_group, ss_desc_val_group,
-    ss_key_tr_in, ss_desc_tr_in,
-    ss_key_val_in, ss_desc_val_in,
-    ss_key_dims_x, ss_desc_dims_x,
-    ss_key_dims_y, ss_desc_dims_y,
-    ss_key_tr_ph, ss_desc_tr_ph,
-    ss_key_val_ph, ss_desc_val_ph;
+    std::stringstream ss_key_tr_group, ss_desc_tr_group, ss_key_val_group, ss_desc_val_group, ss_key_tr_in,
+      ss_desc_tr_in, ss_key_val_in, ss_desc_val_in, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y,
+      ss_key_tr_ph, ss_desc_tr_ph, ss_key_val_ph, ss_desc_val_ph;
 
     // Parameter group key/description
-    ss_key_tr_group   << "training.source"         << inputNumber;
-    ss_key_val_group  << "validation.source"       << inputNumber;
-    ss_desc_tr_group  << "Parameters for source #" << inputNumber << " (training)";
+    ss_key_tr_group << "training.source" << inputNumber;
+    ss_key_val_group << "validation.source" << inputNumber;
+    ss_desc_tr_group << "Parameters for source #" << inputNumber << " (training)";
     ss_desc_val_group << "Parameters for source #" << inputNumber << " (validation)";
 
     // Parameter group keys
-    ss_key_tr_in   << ss_key_tr_group.str()  << ".il";
-    ss_key_val_in  << ss_key_val_group.str() << ".il";
-    ss_key_dims_x  << ss_key_tr_group.str()  << ".patchsizex";
-    ss_key_dims_y  << ss_key_tr_group.str()  << ".patchsizey";
-    ss_key_tr_ph   << ss_key_tr_group.str()  << ".placeholder";
-    ss_key_val_ph  << ss_key_val_group.str() << ".name";
+    ss_key_tr_in << ss_key_tr_group.str() << ".il";
+    ss_key_val_in << ss_key_val_group.str() << ".il";
+    ss_key_dims_x << ss_key_tr_group.str() << ".patchsizex";
+    ss_key_dims_y << ss_key_tr_group.str() << ".patchsizey";
+    ss_key_tr_ph << ss_key_tr_group.str() << ".placeholder";
+    ss_key_val_ph << ss_key_val_group.str() << ".name";
 
     // Parameter group descriptions
-    ss_desc_tr_in  << "Input image (or list to stack) for source #" << inputNumber << " (training)";
+    ss_desc_tr_in << "Input image (or list to stack) for source #" << inputNumber << " (training)";
     ss_desc_val_in << "Input image (or list to stack) for source #" << inputNumber << " (validation)";
-    ss_desc_dims_x << "Patch size (x) for source #"                 << inputNumber;
-    ss_desc_dims_y << "Patch size (y) for source #"                 << inputNumber;
-    ss_desc_tr_ph  << "Name of the input placeholder for source #"  << inputNumber << " (training)";
+    ss_desc_dims_x << "Patch size (x) for source #" << inputNumber;
+    ss_desc_dims_y << "Patch size (y) for source #" << inputNumber;
+    ss_desc_tr_ph << "Name of the input placeholder for source #" << inputNumber << " (training)";
     ss_desc_val_ph << "Name of the input placeholder "
-        "or output tensor for source #"                             << inputNumber << " (validation)";
+                      "or output tensor for source #"
+                   << inputNumber << " (validation)";
 
     // Populate group
-    AddParameter(ParameterType_Group,          ss_key_tr_group.str(),  ss_desc_tr_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_tr_in.str(),     ss_desc_tr_in.str() );
-    AddParameter(ParameterType_Int,            ss_key_dims_x.str(),    ss_desc_dims_x.str());
-    SetMinimumParameterIntValue               (ss_key_dims_x.str(),    1);
-    AddParameter(ParameterType_Int,            ss_key_dims_y.str(),    ss_desc_dims_y.str());
-    SetMinimumParameterIntValue               (ss_key_dims_y.str(),    1);
-    AddParameter(ParameterType_String,         ss_key_tr_ph.str(),     ss_desc_tr_ph.str());
-    AddParameter(ParameterType_Group,          ss_key_val_group.str(), ss_desc_val_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_val_in.str(),    ss_desc_val_in.str() );
-    AddParameter(ParameterType_String,         ss_key_val_ph.str(),    ss_desc_val_ph.str());
+    AddParameter(ParameterType_Group, ss_key_tr_group.str(), ss_desc_tr_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_tr_in.str(), ss_desc_tr_in.str());
+    AddParameter(ParameterType_Int, ss_key_dims_x.str(), ss_desc_dims_x.str());
+    SetMinimumParameterIntValue(ss_key_dims_x.str(), 1);
+    AddParameter(ParameterType_Int, ss_key_dims_y.str(), ss_desc_dims_y.str());
+    SetMinimumParameterIntValue(ss_key_dims_y.str(), 1);
+    AddParameter(ParameterType_String, ss_key_tr_ph.str(), ss_desc_tr_ph.str());
+    AddParameter(ParameterType_Group, ss_key_val_group.str(), ss_desc_val_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_val_in.str(), ss_desc_val_in.str());
+    AddParameter(ParameterType_String, ss_key_val_ph.str(), ss_desc_val_ph.str());
 
     // Add a new bundle
     ProcessObjectsBundle bundle;
-    bundle.m_KeyInForTrain     = ss_key_tr_in.str();
-    bundle.m_KeyInForValid     = ss_key_val_in.str();
+    bundle.m_KeyInForTrain = ss_key_tr_in.str();
+    bundle.m_KeyInForValid = ss_key_val_in.str();
     bundle.m_KeyPHNameForTrain = ss_key_tr_ph.str();
     bundle.m_KeyPHNameForValid = ss_key_val_ph.str();
-    bundle.m_KeyPszX           = ss_key_dims_x.str();
-    bundle.m_KeyPszY           = ss_key_dims_y.str();
+    bundle.m_KeyPszX = ss_key_dims_x.str();
+    bundle.m_KeyPszY = ss_key_dims_y.str();
 
     m_Bundles.push_back(bundle);
   }
 
-  void DoInit()
+  void
+  DoInit()
   {
 
     // Documentation
     SetName("TensorflowModelTrain");
     SetDescription("Train a multisource deep learning net using Tensorflow. Change "
-        "the " + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of "
-        "sources.");
+                   "the " +
+                   tf::ENV_VAR_NAME_NSOURCES +
+                   " environment variable to set the number of "
+                   "sources.");
     SetDocLongDescription("The application trains a Tensorflow model over multiple data sources. "
-        "The number of input sources can be changed at runtime by setting the "
-        "system environment variable " + tf::ENV_VAR_NAME_NSOURCES + ". "
-        "For each source, you have to set (1) the tensor placeholder name, as named in "
-        "the tensorflow model, (2) the patch size and (3) the image(s) source. ");
+                          "The number of input sources can be changed at runtime by setting the "
+                          "system environment variable " +
+                          tf::ENV_VAR_NAME_NSOURCES +
+                          ". "
+                          "For each source, you have to set (1) the tensor placeholder name, as named in "
+                          "the tensorflow model, (2) the patch size and (3) the image(s) source. ");
     SetDocAuthors("Remi Cresson");
 
     AddDocTag(Tags::Learning);
 
     // Input model
-    AddParameter(ParameterType_Group,       "model",              "Model parameters");
-    AddParameter(ParameterType_Directory,   "model.dir",          "Tensorflow model_save directory");
-    MandatoryOn                            ("model.dir");
-    AddParameter(ParameterType_String,      "model.restorefrom",  "Restore model from path");
-    MandatoryOff                           ("model.restorefrom");
-    AddParameter(ParameterType_String,      "model.saveto",       "Save model to path");
-    MandatoryOff                           ("model.saveto");
-    AddParameter(ParameterType_StringList,  "model.tagsets",    "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
-    MandatoryOff                           ("model.tagsets");
+    AddParameter(ParameterType_Group, "model", "Model parameters");
+    AddParameter(ParameterType_Directory, "model.dir", "Tensorflow model_save directory");
+    MandatoryOn("model.dir");
+    AddParameter(ParameterType_String, "model.restorefrom", "Restore model from path");
+    MandatoryOff("model.restorefrom");
+    AddParameter(ParameterType_String, "model.saveto", "Save model to path");
+    MandatoryOff("model.saveto");
+    AddParameter(ParameterType_StringList,
+                 "model.tagsets",
+                 "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is "
+                 "supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
+    MandatoryOff("model.tagsets");
 
     // Training parameters group
-    AddParameter(ParameterType_Group,       "training",           "Training parameters");
-    AddParameter(ParameterType_Int,         "training.batchsize", "Batch size");
-    SetMinimumParameterIntValue            ("training.batchsize", 1);
-    SetDefaultParameterInt                 ("training.batchsize", 100);
-    AddParameter(ParameterType_Int,         "training.epochs",    "Number of epochs");
-    SetMinimumParameterIntValue            ("training.epochs",    1);
-    SetDefaultParameterInt                 ("training.epochs",    100);
-    AddParameter(ParameterType_StringList,  "training.userplaceholders",
+    AddParameter(ParameterType_Group, "training", "Training parameters");
+    AddParameter(ParameterType_Int, "training.batchsize", "Batch size");
+    SetMinimumParameterIntValue("training.batchsize", 1);
+    SetDefaultParameterInt("training.batchsize", 100);
+    AddParameter(ParameterType_Int, "training.epochs", "Number of epochs");
+    SetMinimumParameterIntValue("training.epochs", 1);
+    SetDefaultParameterInt("training.epochs", 100);
+    AddParameter(ParameterType_StringList,
+                 "training.userplaceholders",
                  "Additional single-valued placeholders for training. Supported types: int, float, bool.");
-    MandatoryOff                           ("training.userplaceholders");
-    AddParameter(ParameterType_StringList,  "training.targetnodes",    "Names of the target nodes");
-    MandatoryOn                            ("training.targetnodes");
-    AddParameter(ParameterType_StringList,  "training.outputtensors",  "Names of the output tensors to display");
-    MandatoryOff                           ("training.outputtensors");
-    AddParameter(ParameterType_Bool,        "training.usestreaming",   "Use the streaming through patches (slower but can process big dataset)");
-    MandatoryOff                           ("training.usestreaming");
+    MandatoryOff("training.userplaceholders");
+    AddParameter(ParameterType_StringList, "training.targetnodes", "Names of the target nodes");
+    MandatoryOn("training.targetnodes");
+    AddParameter(ParameterType_StringList, "training.outputtensors", "Names of the output tensors to display");
+    MandatoryOff("training.outputtensors");
+    AddParameter(ParameterType_Bool,
+                 "training.usestreaming",
+                 "Use the streaming through patches (slower but can process big dataset)");
+    MandatoryOff("training.usestreaming");
 
     // Metrics
-    AddParameter(ParameterType_Group,       "validation",              "Validation parameters");
-    MandatoryOff                           ("validation");
-    AddParameter(ParameterType_Int,         "validation.step",         "Perform the validation every Nth epochs");
-    SetMinimumParameterIntValue            ("validation.step",         1);
-    SetDefaultParameterInt                 ("validation.step",         10);
-    AddParameter(ParameterType_Choice,      "validation.mode",         "Metrics to compute");
-    AddChoice                              ("validation.mode.none",    "No validation step");
-    AddChoice                              ("validation.mode.class",   "Classification metrics");
-    AddChoice                              ("validation.mode.rmse",    "Root mean square error");
-    AddParameter(ParameterType_StringList,  "validation.userplaceholders",
+    AddParameter(ParameterType_Group, "validation", "Validation parameters");
+    MandatoryOff("validation");
+    AddParameter(ParameterType_Int, "validation.step", "Perform the validation every Nth epochs");
+    SetMinimumParameterIntValue("validation.step", 1);
+    SetDefaultParameterInt("validation.step", 10);
+    AddParameter(ParameterType_Choice, "validation.mode", "Metrics to compute");
+    AddChoice("validation.mode.none", "No validation step");
+    AddChoice("validation.mode.class", "Classification metrics");
+    AddChoice("validation.mode.rmse", "Root mean square error");
+    AddParameter(ParameterType_StringList,
+                 "validation.userplaceholders",
                  "Additional single-valued placeholders for validation. Supported types: int, float, bool.");
-    MandatoryOff                           ("validation.userplaceholders");
-    AddParameter(ParameterType_Bool,        "validation.usestreaming", "Use the streaming through patches (slower but can process big dataset)");
-    MandatoryOff                           ("validation.usestreaming");
+    MandatoryOff("validation.userplaceholders");
+    AddParameter(ParameterType_Bool,
+                 "validation.usestreaming",
+                 "Use the streaming through patches (slower but can process big dataset)");
+    MandatoryOff("validation.usestreaming");
 
     // Input/output images
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() + 1 ; i++) // +1 because we have at least 1 source more for training
-      {
+    for (int i = 1; i < tf::GetNumberOfSources() + 1; i++) // +1 because we have at least 1 source more for training
+    {
       AddAnInputImage();
-      }
+    }
 
     // Example
-    SetDocExampleParameterValue("source1.il",                "spot6pms.tif");
-    SetDocExampleParameterValue("source1.placeholder",       "x1");
-    SetDocExampleParameterValue("source1.patchsizex",        "16");
-    SetDocExampleParameterValue("source1.patchsizey",        "16");
-    SetDocExampleParameterValue("source2.il",                "labels.tif");
-    SetDocExampleParameterValue("source2.placeholder",       "y1");
-    SetDocExampleParameterValue("source2.patchsizex",        "1");
-    SetDocExampleParameterValue("source2.patchsizex",        "1");
-    SetDocExampleParameterValue("model.dir",                 "/tmp/my_saved_model/");
+    SetDocExampleParameterValue("source1.il", "spot6pms.tif");
+    SetDocExampleParameterValue("source1.placeholder", "x1");
+    SetDocExampleParameterValue("source1.patchsizex", "16");
+    SetDocExampleParameterValue("source1.patchsizey", "16");
+    SetDocExampleParameterValue("source2.il", "labels.tif");
+    SetDocExampleParameterValue("source2.placeholder", "y1");
+    SetDocExampleParameterValue("source2.patchsizex", "1");
+    SetDocExampleParameterValue("source2.patchsizex", "1");
+    SetDocExampleParameterValue("model.dir", "/tmp/my_saved_model/");
     SetDocExampleParameterValue("training.userplaceholders", "is_training=true dropout=0.2");
-    SetDocExampleParameterValue("training.targetnodes",      "optimizer");
-    SetDocExampleParameterValue("model.saveto",              "/tmp/my_saved_model/variables/variables");
-
+    SetDocExampleParameterValue("training.targetnodes", "optimizer");
+    SetDocExampleParameterValue("model.saveto", "/tmp/my_saved_model/variables/variables");
   }
 
   //
@@ -261,7 +270,8 @@ public:
   //       if we can keep trace of indices of sources for
   //       training / test / validation
   //
-  void PrepareInputs()
+  void
+  PrepareInputs()
   {
     // Clear placeholder names
     m_InputPlaceholdersForTraining.clear();
@@ -283,8 +293,8 @@ public:
 
 
     // Prepare the bundles
-    for (auto& bundle: m_Bundles)
-      {
+    for (auto & bundle : m_Bundles)
+    {
       // Source
       FloatVectorImageListType::Pointer trainStack = GetParameterImageList(bundle.m_KeyInForTrain);
       bundle.tfSource.Set(trainStack);
@@ -301,17 +311,17 @@ public:
       m_InputPatchesSizeForTraining.push_back(patchSize);
 
       otbAppLogINFO("New source:");
-      otbAppLogINFO("Patch size               : "<< patchSize);
-      otbAppLogINFO("Placeholder (training)   : "<< placeholderForTraining);
+      otbAppLogINFO("Patch size               : " << patchSize);
+      otbAppLogINFO("Placeholder (training)   : " << placeholderForTraining);
 
       // Prepare validation sources
       if (GetParameterInt("validation.mode") != 0)
-        {
+      {
         // Get the stack
         if (!HasValue(bundle.m_KeyInForValid))
-          {
+        {
           otbAppLogFATAL("No validation input is set for this source");
-          }
+        }
         FloatVectorImageListType::Pointer validStack = GetParameterImageList(bundle.m_KeyInForValid);
         bundle.tfSourceForValidation.Set(validStack);
 
@@ -319,12 +329,12 @@ public:
         // If yes, it means that its not an output tensor on which perform the validation
         std::string placeholderForValidation = GetParameterAsString(bundle.m_KeyPHNameForValid);
         if (placeholderForValidation.empty())
-          {
+        {
           placeholderForValidation = placeholderForTraining;
-          }
+        }
         // Same placeholder name ==> is a source for validation
         if (placeholderForValidation.compare(placeholderForTraining) == 0)
-          {
+        {
           // Source
           m_InputSourcesForEvaluationAgainstValidationData.push_back(bundle.tfSourceForValidation.Get());
           m_InputSourcesForEvaluationAgainstLearningData.push_back(bundle.tfSource.Get());
@@ -335,12 +345,11 @@ public:
           // Patch size
           m_InputPatchesSizeForValidation.push_back(patchSize);
 
-          otbAppLogINFO("Placeholder (validation) : "<< placeholderForValidation);
-
-          }
+          otbAppLogINFO("Placeholder (validation) : " << placeholderForValidation);
+        }
         // Different placeholder ==> is a target to validate
         else
-          {
+        {
           // Source
           m_InputTargetsForEvaluationAgainstValidationData.push_back(bundle.tfSourceForValidation.Get());
           m_InputTargetsForEvaluationAgainstLearningData.push_back(bundle.tfSource.Get());
@@ -351,51 +360,54 @@ public:
           // Patch size
           m_TargetPatchesSize.push_back(patchSize);
 
-          otbAppLogINFO("Tensor name (validation) : "<< placeholderForValidation);
-          }
-
+          otbAppLogINFO("Tensor name (validation) : " << placeholderForValidation);
         }
-
       }
+    }
   }
 
   //
   // Get user placeholders
   //
-  TrainModelFilterType::DictType GetUserPlaceholders(const std::string & key)
+  TrainModelFilterType::DictType
+  GetUserPlaceholders(const std::string & key)
   {
-    TrainModelFilterType::DictType dict;
+    TrainModelFilterType::DictType   dict;
     TrainModelFilterType::StringList expressions = GetParameterStringList(key);
-    for (auto& exp: expressions)
-      {
+    for (auto & exp : expressions)
+    {
       TrainModelFilterType::DictElementType entry = tf::ExpressionToTensor(exp);
       dict.push_back(entry);
 
       otbAppLogINFO("Using placeholder " << entry.first << " with " << tf::PrintTensorInfos(entry.second));
-      }
+    }
     return dict;
   }
 
   //
   // Print some classification metrics
   //
-  void PrintClassificationMetrics(const ConfMatType & confMat, const MapOfClassesType & mapOfClassesRef)
+  void
+  PrintClassificationMetrics(const ConfMatType & confMat, const MapOfClassesType & mapOfClassesRef)
   {
     ConfusionMatrixCalculatorType::Pointer confMatMeasurements = ConfusionMatrixCalculatorType::New();
     confMatMeasurements->SetConfusionMatrix(confMat);
     confMatMeasurements->SetMapOfClasses(mapOfClassesRef);
     confMatMeasurements->Compute();
 
-    for (auto const& itMapOfClassesRef : mapOfClassesRef)
-      {
+    for (auto const & itMapOfClassesRef : mapOfClassesRef)
+    {
       LabelValueType labelRef = itMapOfClassesRef.first;
       LabelValueType indexLabelRef = itMapOfClassesRef.second;
 
-      otbAppLogINFO("Precision of class [" << labelRef << "] vs all: " << confMatMeasurements->GetPrecisions()[indexLabelRef]);
-      otbAppLogINFO("Recall of class [" << labelRef << "] vs all: " << confMatMeasurements->GetRecalls()[indexLabelRef]);
-      otbAppLogINFO("F-score of class [" << labelRef << "] vs all: " << confMatMeasurements->GetFScores()[indexLabelRef]);
+      otbAppLogINFO("Precision of class [" << labelRef
+                                           << "] vs all: " << confMatMeasurements->GetPrecisions()[indexLabelRef]);
+      otbAppLogINFO("Recall of class [" << labelRef
+                                        << "] vs all: " << confMatMeasurements->GetRecalls()[indexLabelRef]);
+      otbAppLogINFO("F-score of class [" << labelRef
+                                         << "] vs all: " << confMatMeasurements->GetFScores()[indexLabelRef]);
       otbAppLogINFO("\t");
-      }
+    }
     otbAppLogINFO("Precision of the different classes: " << confMatMeasurements->GetPrecisions());
     otbAppLogINFO("Recall of the different classes: " << confMatMeasurements->GetRecalls());
     otbAppLogINFO("F-score of the different classes: " << confMatMeasurements->GetFScores());
@@ -405,7 +417,8 @@ public:
     otbAppLogINFO("Confusion matrix:\n" << confMat);
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
 
     // Load the Tensorflow bundle
@@ -413,13 +426,13 @@ public:
 
     // Check if we have to restore variables from somewhere else
     if (HasValue("model.restorefrom"))
-      {
+    {
       const std::string path = GetParameterAsString("model.restorefrom");
       otbAppLogINFO("Restoring model from " + path);
 
       // Load SavedModel variables
       tf::RestoreModel(path, m_SavedModel);
-      }
+    }
 
     // Prepare inputs
     PrepareInputs();
@@ -434,18 +447,16 @@ public:
     m_TrainModelFilter->SetUseStreaming(GetParameterInt("training.usestreaming"));
 
     // Set inputs
-    for (unsigned int i = 0 ; i < m_InputSourcesForTraining.size() ; i++)
-      {
+    for (unsigned int i = 0; i < m_InputSourcesForTraining.size(); i++)
+    {
       m_TrainModelFilter->PushBackInputTensorBundle(
-          m_InputPlaceholdersForTraining[i],
-          m_InputPatchesSizeForTraining[i],
-          m_InputSourcesForTraining[i]);
-      }
+        m_InputPlaceholdersForTraining[i], m_InputPatchesSizeForTraining[i], m_InputSourcesForTraining[i]);
+    }
 
     // Setup the validation filter
     const bool do_validation = HasUserValue("validation.mode");
-    if (GetParameterInt("validation.mode")==1) // class
-      {
+    if (GetParameterInt("validation.mode") == 1) // class
+    {
       otbAppLogINFO("Set validation mode to classification validation");
 
       m_ValidateModelFilter = ValidateModelFilterType::New();
@@ -456,18 +467,18 @@ public:
       m_ValidateModelFilter->SetInputReceptiveFields(m_InputPatchesSizeForValidation);
       m_ValidateModelFilter->SetOutputTensors(m_TargetTensorsNames);
       m_ValidateModelFilter->SetOutputExpressionFields(m_TargetPatchesSize);
-      }
-    else if (GetParameterInt("validation.mode")==2) // rmse)
-      {
+    }
+    else if (GetParameterInt("validation.mode") == 2) // rmse)
+    {
       otbAppLogINFO("Set validation mode to classification RMSE evaluation");
       otbAppLogFATAL("Not implemented yet !"); // XD
 
       // TODO
-      }
+    }
 
     // Epoch
-    for (int epoch = 1 ; epoch <= GetParameterInt("training.epochs") ; epoch++)
-      {
+    for (int epoch = 1; epoch <= GetParameterInt("training.epochs"); epoch++)
+    {
       // Train the model
       AddProcess(m_TrainModelFilter, "Training epoch #" + std::to_string(epoch));
       m_TrainModelFilter->Update();
@@ -479,7 +490,7 @@ public:
         {
           // 1. Evaluate the metrics against the learning data
 
-          for (unsigned int i = 0 ; i < m_InputSourcesForEvaluationAgainstLearningData.size() ; i++)
+          for (unsigned int i = 0; i < m_InputSourcesForEvaluationAgainstLearningData.size(); i++)
           {
             m_ValidateModelFilter->SetInput(i, m_InputSourcesForEvaluationAgainstLearningData[i]);
           }
@@ -492,16 +503,17 @@ public:
           AddProcess(m_ValidateModelFilter, "Evaluate model (Learning data)");
           m_ValidateModelFilter->Update();
 
-          for (unsigned int i = 0 ; i < m_TargetTensorsNames.size() ; i++)
+          for (unsigned int i = 0; i < m_TargetTensorsNames.size(); i++)
           {
             otbAppLogINFO("Metrics for target \"" << m_TargetTensorsNames[i] << "\":");
-            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i), m_ValidateModelFilter->GetMapOfClasses(i));
+            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i),
+                                       m_ValidateModelFilter->GetMapOfClasses(i));
           }
 
           // 2. Evaluate the metrics against the validation data
 
           // Here we just change the input sources and references
-          for (unsigned int i = 0 ; i < m_InputSourcesForEvaluationAgainstValidationData.size() ; i++)
+          for (unsigned int i = 0; i < m_InputSourcesForEvaluationAgainstValidationData.size(); i++)
           {
             m_ValidateModelFilter->SetInput(i, m_InputSourcesForEvaluationAgainstValidationData[i]);
           }
@@ -512,29 +524,28 @@ public:
           AddProcess(m_ValidateModelFilter, "Evaluate model (Validation data)");
           m_ValidateModelFilter->Update();
 
-          for (unsigned int i = 0 ; i < m_TargetTensorsNames.size() ; i++)
+          for (unsigned int i = 0; i < m_TargetTensorsNames.size(); i++)
           {
             otbAppLogINFO("Metrics for target \"" << m_TargetTensorsNames[i] << "\":");
-            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i), m_ValidateModelFilter->GetMapOfClasses(i));
+            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i),
+                                       m_ValidateModelFilter->GetMapOfClasses(i));
           }
         } // Step is OK to perform validation
-      } // Do the validation against the validation data
+      }   // Do the validation against the validation data
 
-      } // Next epoch
+    } // Next epoch
 
     // Check if we have to save variables to somewhere
     if (HasValue("model.saveto"))
-      {
+    {
       const std::string path = GetParameterAsString("model.saveto");
       otbAppLogINFO("Saving model to " + path);
       tf::SaveModel(path, m_SavedModel);
-      }
-
+    }
   }
 
 private:
-
-  tensorflow::SavedModelBundle     m_SavedModel; // must be alive during all the execution of the application !
+  tensorflow::SavedModelBundle m_SavedModel; // must be alive during all the execution of the application !
 
   // Filters
   TrainModelFilterType::Pointer    m_TrainModelFilter;
@@ -544,9 +555,9 @@ private:
   BundleList m_Bundles;
 
   // Patches size
-  SizeList   m_InputPatchesSizeForTraining;
-  SizeList   m_InputPatchesSizeForValidation;
-  SizeList   m_TargetPatchesSize;
+  SizeList m_InputPatchesSizeForTraining;
+  SizeList m_InputPatchesSizeForValidation;
+  SizeList m_TargetPatchesSize;
 
   // Placeholders and Tensors names
   StringList m_InputPlaceholdersForTraining;
@@ -562,7 +573,7 @@ private:
 
 }; // end of class
 
-} // namespace wrapper
+} // namespace Wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT( otb::Wrapper::TensorflowModelTrain )
+OTB_APPLICATION_EXPORT(otb::Wrapper::TensorflowModelTrain)
diff --git a/app/otbTrainClassifierFromDeepFeatures.cxx b/app/otbTrainClassifierFromDeepFeatures.cxx
index 39ac4189..cc3ec9ed 100644
--- a/app/otbTrainClassifierFromDeepFeatures.cxx
+++ b/app/otbTrainClassifierFromDeepFeatures.cxx
@@ -34,23 +34,23 @@ class TrainClassifierFromDeepFeatures : public CompositeApplication
 {
 public:
   /** Standard class typedefs. */
-  typedef TrainClassifierFromDeepFeatures              Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
+  typedef TrainClassifierFromDeepFeatures Self;
+  typedef Application                     Superclass;
+  typedef itk::SmartPointer<Self>         Pointer;
+  typedef itk::SmartPointer<const Self>   ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(TrainClassifierFromDeepFeatures, otb::Wrapper::CompositeApplication);
 
 private:
-
   //
   // Add an input source, which includes:
   // -an input image list
   // -an input patchsize (dimensions of samples)
   //
-  void AddAnInputImage(int inputNumber = 0)
+  void
+  AddAnInputImage(int inputNumber = 0)
   {
     inputNumber++;
 
@@ -61,70 +61,83 @@ private:
 
     // Populate group
     ShareParameter(ss_key_group.str(), "tfmodel." + ss_key_group.str(), ss_desc_group.str());
-
   }
 
-  void DoInit()
+  void
+  DoInit()
   {
 
-  SetName("TrainClassifierFromDeepFeatures");
-  SetDescription("Train a classifier from deep net based features of an image and training vector data.");
-
-  // Documentation
-  SetDocLongDescription("See TrainImagesClassifier application");
-  SetDocLimitations("None");
-  SetDocAuthors("Remi Cresson");
-  SetDocSeeAlso(" ");
-
-  AddDocTag(Tags::Learning);
-
-  ClearApplications();
-
-  // Add applications
-  AddApplication("TrainImagesClassifier",  "train",   "Train images classifier");
-  AddApplication("TensorflowModelServe",   "tfmodel", "Serve the TF model");
-
-  // Model shared parameters
-  AddAnInputImage();
-  for (int i = 1; i < tf::GetNumberOfSources() ; i++)
-  {
-    AddAnInputImage(i);
+    SetName("TrainClassifierFromDeepFeatures");
+    SetDescription("Train a classifier from deep net based features of an image and training vector data.");
+
+    // Documentation
+    SetDocLongDescription("See TrainImagesClassifier application");
+    SetDocLimitations("None");
+    SetDocAuthors("Remi Cresson");
+    SetDocSeeAlso(" ");
+
+    AddDocTag(Tags::Learning);
+
+    ClearApplications();
+
+    // Add applications
+    AddApplication("TrainImagesClassifier", "train", "Train images classifier");
+    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model");
+
+    // Model shared parameters
+    AddAnInputImage();
+    for (int i = 1; i < tf::GetNumberOfSources(); i++)
+    {
+      AddAnInputImage(i);
+    }
+    ShareParameter("model",
+                   "tfmodel.model",
+                   "Deep net inputs parameters",
+                   "Parameters of the deep net inputs: placeholder names, receptive fields, etc.");
+    ShareParameter("output",
+                   "tfmodel.output",
+                   "Deep net outputs parameters",
+                   "Parameters of the deep net outputs: tensors names, expression fields, etc.");
+    ShareParameter("optim",
+                   "tfmodel.optim",
+                   "Processing time optimization",
+                   "This group of parameters allows optimization of processing time");
+
+    // Train shared parameters
+    ShareParameter("ram", "train.ram", "Available RAM (Mb)", "Available RAM (Mb)");
+    ShareParameter("vd", "train.io.vd", "Vector data for training", "Input vector data for training");
+    ShareParameter("valid", "train.io.valid", "Vector data for validation", "Input vector data for validation");
+    ShareParameter("out", "train.io.out", "Output classification model", "Output classification model");
+    ShareParameter("confmatout",
+                   "train.io.confmatout",
+                   "Output confusion matrix",
+                   "Output confusion matrix of the classification model");
+
+    // Shared parameter groups
+    ShareParameter("sample", "train.sample", "Sampling parameters", "Training and validation samples parameters");
+    ShareParameter("elev", "train.elev", "Elevation parameters", "Elevation parameters");
+    ShareParameter("classifier", "train.classifier", "Classifier parameters", "Classifier parameters");
+    ShareParameter("rand", "train.rand", "User defined random seed", "User defined random seed");
   }
-  ShareParameter("model",      "tfmodel.model",       "Deep net inputs parameters",   "Parameters of the deep net inputs: placeholder names, receptive fields, etc.");
-  ShareParameter("output",     "tfmodel.output",      "Deep net outputs parameters",  "Parameters of the deep net outputs: tensors names, expression fields, etc.");
-  ShareParameter("optim",      "tfmodel.optim",       "Processing time optimization", "This group of parameters allows optimization of processing time");
-
-  // Train shared parameters
-  ShareParameter("ram",        "train.ram",           "Available RAM (Mb)",           "Available RAM (Mb)");
-  ShareParameter("vd",         "train.io.vd",         "Vector data for training",     "Input vector data for training");
-  ShareParameter("valid",      "train.io.valid",      "Vector data for validation",   "Input vector data for validation");
-  ShareParameter("out",        "train.io.out",        "Output classification model",  "Output classification model");
-  ShareParameter("confmatout", "train.io.confmatout", "Output confusion matrix",      "Output confusion matrix of the classification model");
-
-  // Shared parameter groups
-  ShareParameter("sample",     "train.sample",        "Sampling parameters" ,         "Training and validation samples parameters" );
-  ShareParameter("elev",       "train.elev",          "Elevation parameters",         "Elevation parameters" );
-  ShareParameter("classifier", "train.classifier",    "Classifier parameters",        "Classifier parameters" );
-  ShareParameter("rand",       "train.rand",          "User defined random seed",     "User defined random seed" );
 
-  }
 
-
-  void DoUpdateParameters()
+  void
+  DoUpdateParameters()
   {
     UpdateInternalParameters("train");
   }
 
-  void DoExecute()
+  void
+  DoExecute()
   {
     ExecuteInternal("tfmodel");
-    GetInternalApplication("train")->AddImageToParameterInputImageList("io.il", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
+    GetInternalApplication("train")->AddImageToParameterInputImageList(
+      "io.il", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
     UpdateInternalParameters("train");
     ExecuteInternal("train");
   }
-
 };
 } // namespace Wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT( otb::Wrapper::TrainClassifierFromDeepFeatures )
+OTB_APPLICATION_EXPORT(otb::Wrapper::TrainClassifierFromDeepFeatures)
diff --git a/include/otbTensorflowCommon.cxx b/include/otbTensorflowCommon.cxx
index 662c9d3e..b7a27c60 100644
--- a/include/otbTensorflowCommon.cxx
+++ b/include/otbTensorflowCommon.cxx
@@ -11,8 +11,10 @@
 =========================================================================*/
 #include "otbTensorflowCommon.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 //
 // Environment variable for the number of sources in "Multisource" applications
@@ -22,21 +24,21 @@ const std::string ENV_VAR_NAME_NSOURCES = "OTB_TF_NSOURCES";
 //
 // Get the environment variable as int
 //
-int GetEnvironmentVariableAsInt(const std::string & variableName)
+int
+GetEnvironmentVariableAsInt(const std::string & variableName)
 {
-  int ret = -1;
-  char const* tmp = getenv( variableName.c_str() );
-  if ( tmp != NULL )
+  int          ret = -1;
+  char const * tmp = getenv(variableName.c_str());
+  if (tmp != NULL)
   {
-    std::string s( tmp );
+    std::string s(tmp);
     try
     {
       ret = std::stoi(s);
     }
-    catch(...)
+    catch (...)
     {
-      itkGenericExceptionMacro("Error parsing variable "
-          << variableName << " as integer. Value is " << s);
+      itkGenericExceptionMacro("Error parsing variable " << variableName << " as integer. Value is " << s);
     }
   }
 
@@ -47,7 +49,8 @@ int GetEnvironmentVariableAsInt(const std::string & variableName)
 // This function returns the numeric content of the ENV_VAR_NAME_NSOURCES
 // environment variable
 //
-int GetNumberOfSources()
+int
+GetNumberOfSources()
 {
   int ret = GetEnvironmentVariableAsInt(ENV_VAR_NAME_NSOURCES);
   if (ret != -1)
@@ -60,15 +63,18 @@ int GetNumberOfSources()
 //
 // This function copy a patch from an input image to an output image
 //
-template<class TImage>
-void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & inputPatchIndex,
-    typename TImage::Pointer outputImg, typename TImage::IndexType & outputPatchIndex,
-    typename TImage::SizeType patchSize)
+template <class TImage>
+void
+CopyPatch(typename TImage::Pointer     inputImg,
+          typename TImage::IndexType & inputPatchIndex,
+          typename TImage::Pointer     outputImg,
+          typename TImage::IndexType & outputPatchIndex,
+          typename TImage::SizeType    patchSize)
 {
-  typename TImage::RegionType inputPatchRegion(inputPatchIndex, patchSize);
-  typename TImage::RegionType outputPatchRegion(outputPatchIndex, patchSize);
-  typename itk::ImageRegionConstIterator<TImage> inIt (inputImg, inputPatchRegion);
-  typename itk::ImageRegionIterator<TImage> outIt (outputImg, outputPatchRegion);
+  typename TImage::RegionType                    inputPatchRegion(inputPatchIndex, patchSize);
+  typename TImage::RegionType                    outputPatchRegion(outputPatchIndex, patchSize);
+  typename itk::ImageRegionConstIterator<TImage> inIt(inputImg, inputPatchRegion);
+  typename itk::ImageRegionIterator<TImage>      outIt(outputImg, outputPatchRegion);
   for (inIt.GoToBegin(), outIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt, ++outIt)
   {
     outIt.Set(inIt.Get());
@@ -78,9 +84,9 @@ void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & i
 //
 // Get image infos
 //
-template<class TImage>
-void GetImageInfo(typename TImage::Pointer image,
-    unsigned int & sizex, unsigned int & sizey, unsigned int & nBands)
+template <class TImage>
+void
+GetImageInfo(typename TImage::Pointer image, unsigned int & sizex, unsigned int & sizey, unsigned int & nBands)
 {
   nBands = image->GetNumberOfComponentsPerPixel();
   sizex = image->GetLargestPossibleRegion().GetSize(0);
@@ -90,8 +96,9 @@ void GetImageInfo(typename TImage::Pointer image,
 //
 // Propagate the requested region in the image
 //
-template<class TImage>
-void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region)
+template <class TImage>
+void
+PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region)
 {
   image->SetRequestedRegion(region);
   image->PropagateRequestedRegion();
@@ -101,13 +108,16 @@ void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::R
 //
 // Sample an input image at the specified location
 //
-template<class TImage>
-bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer outPtr,
-    typename TImage::PointType point, unsigned int elemIdx,
-    typename TImage::SizeType patchSize)
+template <class TImage>
+bool
+SampleImage(const typename TImage::Pointer inPtr,
+            typename TImage::Pointer       outPtr,
+            typename TImage::PointType     point,
+            unsigned int                   elemIdx,
+            typename TImage::SizeType      patchSize)
 {
   typename TImage::IndexType index, outIndex;
-  bool canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
+  bool                       canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
   if (canTransform)
   {
     outIndex[0] = 0;
@@ -128,7 +138,6 @@ bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer
     }
   }
   return false;
-
 }
 
 } // end namespace tf
diff --git a/include/otbTensorflowCommon.h b/include/otbTensorflowCommon.h
index fbd72810..a012173c 100644
--- a/include/otbTensorflowCommon.h
+++ b/include/otbTensorflowCommon.h
@@ -22,38 +22,49 @@
 #include "itkImageRegionConstIterator.h"
 #include "itkImageRegionIterator.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Environment variable for the number of sources in "Multisource" applications
 extern const std::string ENV_VAR_NAME_NSOURCES;
 
 // Get the environment variable as int
-int GetEnvironmentVariableAsInt(const std::string & variableName);
+int
+GetEnvironmentVariableAsInt(const std::string & variableName);
 
 // Get the value (as int) of the environment variable ENV_VAR_NAME_NSOURCES
-int GetNumberOfSources();
+int
+GetNumberOfSources();
 
 // This function copy a patch from an input image to an output image
-template<class TImage>
-void CopyPatch(typename TImage::Pointer inputImg, typename TImage::IndexType & inputPatchIndex,
-    typename TImage::Pointer outputImg, typename TImage::IndexType & outputPatchIndex,
-    typename TImage::SizeType patchSize);
+template <class TImage>
+void
+CopyPatch(typename TImage::Pointer     inputImg,
+          typename TImage::IndexType & inputPatchIndex,
+          typename TImage::Pointer     outputImg,
+          typename TImage::IndexType & outputPatchIndex,
+          typename TImage::SizeType    patchSize);
 
 // Get image infos
-template<class TImage>
-void GetImageInfo(typename TImage::Pointer image,
-    unsigned int & sizex, unsigned int & sizey, unsigned int & nBands);
+template <class TImage>
+void
+GetImageInfo(typename TImage::Pointer image, unsigned int & sizex, unsigned int & sizey, unsigned int & nBands);
 
 // Propagate the requested region in the image
-template<class TImage>
-void PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region);
+template <class TImage>
+void
+PropagateRequestedRegion(typename TImage::Pointer image, typename TImage::RegionType & region);
 
 // Sample an input image at the specified location
-template<class TImage>
-bool SampleImage(const typename TImage::Pointer inPtr, typename TImage::Pointer outPtr,
-    typename TImage::PointType point, unsigned int elemIdx,
-    typename TImage::SizeType patchSize);
+template <class TImage>
+bool
+SampleImage(const typename TImage::Pointer inPtr,
+            typename TImage::Pointer       outPtr,
+            typename TImage::PointType     point,
+            unsigned int                   elemIdx,
+            typename TImage::SizeType      patchSize);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowCopyUtils.cxx b/include/otbTensorflowCopyUtils.cxx
index e9690511..b2a6e70e 100644
--- a/include/otbTensorflowCopyUtils.cxx
+++ b/include/otbTensorflowCopyUtils.cxx
@@ -218,9 +218,10 @@ CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
   const tensorflow::int64 nElmI = bufferRegion.GetNumberOfPixels() * outputDimSize_C;
   if (nElmI != nElmT)
   {
-    itkGenericExceptionMacro("Number of elements in the tensor is " << nElmT 
-                             << " but image outputRegion has " << nElmI << " values to fill.\n"
-                             << "Buffer region is: \n" << bufferRegion << "\n"
+    itkGenericExceptionMacro("Number of elements in the tensor is "
+                             << nElmT << " but image outputRegion has " << nElmI << " values to fill.\n"
+                             << "Buffer region is: \n"
+                             << bufferRegion << "\n"
                              << "Number of components in the output image: " << outputDimSize_C << "\n"
                              << "Tensor shape: " << PrintTensorShape(tensor.shape()) << "\n"
                              << "Please check the input(s) field of view (FOV), "
@@ -347,7 +348,7 @@ ValueToTensor(std::string value)
   }
 
   // Create tensor
-  tensorflow::TensorShape shape({values.size()});
+  tensorflow::TensorShape shape({ values.size() });
   tensorflow::Tensor      out(tensorflow::DT_BOOL, shape);
   if (is_digit)
   {
@@ -409,7 +410,7 @@ ValueToTensor(std::string value)
     }
     idx++;
   }
-  otbLogMacro(Debug,  << "Returning tensor: "<< out.DebugString());
+  otbLogMacro(Debug, << "Returning tensor: " << out.DebugString());
 
   return out;
 }
diff --git a/include/otbTensorflowCopyUtils.h b/include/otbTensorflowCopyUtils.h
index 17458791..59e1a744 100644
--- a/include/otbTensorflowCopyUtils.h
+++ b/include/otbTensorflowCopyUtils.h
@@ -34,57 +34,94 @@
 #include <string>
 #include <regex>
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Generate a string with TensorShape infos
-std::string PrintTensorShape(const tensorflow::TensorShape & shp);
+std::string
+PrintTensorShape(const tensorflow::TensorShape & shp);
 
 // Generate a string with tensor infos
-std::string PrintTensorInfos(const tensorflow::Tensor & tensor);
+std::string
+PrintTensorInfos(const tensorflow::Tensor & tensor);
 
 // Create a tensor with the good datatype
-template<class TImage>
-tensorflow::Tensor CreateTensor(tensorflow::TensorShape & shape);
+template <class TImage>
+tensorflow::Tensor
+CreateTensor(tensorflow::TensorShape & shape);
 
 // Populate a tensor with the buffered region of a vector image
-template<class TImage>
-void PopulateTensorFromBufferedVectorImage(const typename TImage::Pointer bufferedimagePtr, tensorflow::Tensor & out_tensor);
+template <class TImage>
+void
+PopulateTensorFromBufferedVectorImage(const typename TImage::Pointer bufferedimagePtr, tensorflow::Tensor & out_tensor);
 
 // Populate the buffered region of a vector image with a given tensor's values
-template<class TImage>
-void TensorToImageBuffer(const tensorflow::Tensor & tensor, typename TImage::Pointer & image);
+template <class TImage>
+void
+TensorToImageBuffer(const tensorflow::Tensor & tensor, typename TImage::Pointer & image);
 
 // Recopy an VectorImage region into a 4D-shaped tensorflow::Tensor ({-1, sz_y, sz_x, sz_bands})
-template<class TImage, class TValueType=typename TImage::InternalPixelType>
-void RecopyImageRegionToTensor(const typename TImage::Pointer inputPtr,  const typename TImage::RegionType & region, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage, class TValueType = typename TImage::InternalPixelType>
+void
+RecopyImageRegionToTensor(const typename TImage::Pointer      inputPtr,
+                          const typename TImage::RegionType & region,
+                          tensorflow::Tensor &                tensor,
+                          unsigned int                        elemIdx);
 
 // Recopy an VectorImage region into a 4D-shaped tensorflow::Tensor (TValueType-agnostic function)
-template<class TImage>
-void RecopyImageRegionToTensorWithCast(const typename TImage::Pointer inputPtr,  const typename TImage::RegionType & region, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage>
+void
+RecopyImageRegionToTensorWithCast(const typename TImage::Pointer      inputPtr,
+                                  const typename TImage::RegionType & region,
+                                  tensorflow::Tensor &                tensor,
+                                  unsigned int                        elemIdx);
 
 // Sample a centered patch
-template<class TImage>
-void SampleCenteredPatch(const typename TImage::Pointer inputPtr, const typename TImage::IndexType & centerIndex, const typename TImage::SizeType & patchSize, tensorflow::Tensor & tensor, unsigned int elemIdx);
-template<class TImage>
-void SampleCenteredPatch(const typename TImage::Pointer inputPtr, const typename TImage::PointType & centerCoord, const typename TImage::SizeType & patchSize, tensorflow::Tensor & tensor, unsigned int elemIdx);
+template <class TImage>
+void
+SampleCenteredPatch(const typename TImage::Pointer     inputPtr,
+                    const typename TImage::IndexType & centerIndex,
+                    const typename TImage::SizeType &  patchSize,
+                    tensorflow::Tensor &               tensor,
+                    unsigned int                       elemIdx);
+template <class TImage>
+void
+SampleCenteredPatch(const typename TImage::Pointer     inputPtr,
+                    const typename TImage::PointType & centerCoord,
+                    const typename TImage::SizeType &  patchSize,
+                    tensorflow::Tensor &               tensor,
+                    unsigned int                       elemIdx);
 
 // Return the number of channels from the TensorflowShapeProto
-tensorflow::int64 GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto);
+tensorflow::int64
+GetNumberOfChannelsFromShapeProto(const tensorflow::TensorShapeProto & proto);
 
 // Copy a tensor into the image region
-template<class TImage, class TValueType>
-void CopyTensorToImageRegion(const tensorflow::Tensor & tensor, typename TImage::Pointer outputPtr, const typename TImage::RegionType & region, int & channelOffset);
+template <class TImage, class TValueType>
+void
+CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
+                        typename TImage::Pointer            outputPtr,
+                        const typename TImage::RegionType & region,
+                        int &                               channelOffset);
 
 // Copy a tensor into the image region (TValueType-agnostic version)
-template<class TImage>
-void CopyTensorToImageRegion(const tensorflow::Tensor & tensor, const typename TImage::RegionType & bufferRegion, typename TImage::Pointer outputPtr, const typename TImage::RegionType & outputRegion, int & channelOffset);
+template <class TImage>
+void
+CopyTensorToImageRegion(const tensorflow::Tensor &          tensor,
+                        const typename TImage::RegionType & bufferRegion,
+                        typename TImage::Pointer            outputPtr,
+                        const typename TImage::RegionType & outputRegion,
+                        int &                               channelOffset);
 
 // Convert a value into a tensor
-tensorflow::Tensor ValueToTensor(std::string value);
+tensorflow::Tensor
+ValueToTensor(std::string value);
 
 // Convert an expression into a dict
-std::pair<std::string, tensorflow::Tensor> ExpressionToTensor(std::string expression);
+std::pair<std::string, tensorflow::Tensor>
+ExpressionToTensor(std::string expression);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowDataTypeBridge.cxx b/include/otbTensorflowDataTypeBridge.cxx
index a510cb4e..71fcd8c6 100644
--- a/include/otbTensorflowDataTypeBridge.cxx
+++ b/include/otbTensorflowDataTypeBridge.cxx
@@ -11,14 +11,17 @@
 =========================================================================*/
 #include "otbTensorflowDataTypeBridge.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 //
 // returns the datatype used by tensorflow
 //
-template<class Type>
-tensorflow::DataType GetTensorflowDataType()
+template <class Type>
+tensorflow::DataType
+GetTensorflowDataType()
 {
   if (typeid(Type) == typeid(bool))
   {
@@ -74,8 +77,9 @@ tensorflow::DataType GetTensorflowDataType()
 //
 // Return true if the tensor data type is correct
 //
-template<class Type>
-bool HasSameDataType(const tensorflow::Tensor & tensor)
+template <class Type>
+bool
+HasSameDataType(const tensorflow::Tensor & tensor)
 {
   return GetTensorflowDataType<Type>() == tensor.dtype();
 }
@@ -83,7 +87,8 @@ bool HasSameDataType(const tensorflow::Tensor & tensor)
 //
 // Return the datatype as string
 //
-tensorflow::string GetDataTypeAsString(tensorflow::DataType dt)
+tensorflow::string
+GetDataTypeAsString(tensorflow::DataType dt)
 {
   return tensorflow::DataTypeString(dt);
 }
diff --git a/include/otbTensorflowDataTypeBridge.h b/include/otbTensorflowDataTypeBridge.h
index af6be18d..e815dafc 100644
--- a/include/otbTensorflowDataTypeBridge.h
+++ b/include/otbTensorflowDataTypeBridge.h
@@ -16,19 +16,24 @@
 #include "tensorflow/core/framework/types.h"
 #include "tensorflow/core/framework/tensor.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // returns the datatype used by tensorflow
-template<class Type>
-tensorflow::DataType GetTensorflowDataType();
+template <class Type>
+tensorflow::DataType
+GetTensorflowDataType();
 
 // Return true if the tensor data type is correct
-template<class Type>
-bool HasSameDataType(const tensorflow::Tensor & tensor);
+template <class Type>
+bool
+HasSameDataType(const tensorflow::Tensor & tensor);
 
 // Return datatype as string
-tensorflow::string GetDataTypeAsString(tensorflow::DataType dt);
+tensorflow::string
+GetDataTypeAsString(tensorflow::DataType dt);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowGraphOperations.cxx b/include/otbTensorflowGraphOperations.cxx
index d40c4da6..8c914340 100644
--- a/include/otbTensorflowGraphOperations.cxx
+++ b/include/otbTensorflowGraphOperations.cxx
@@ -109,7 +109,7 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
 
   // Next, we fill layerNames
   int k = 0; // counter used for tensorsNames
-  for (auto const & name: tensorsNames)
+  for (auto const & name : tensorsNames)
   {
     bool                   found = false;
     tensorflow::TensorInfo tensor_info;
@@ -143,7 +143,7 @@ GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::Ten
           otbLogMacro(Debug, << "Found: " << layer.second.name() << " in the model");
         }
       } // next layer
-    } // end else
+    }   // end else
 
     k += 1;
 
@@ -178,7 +178,7 @@ PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::s
     tensorflow::NodeDef node = graph.node(i);
     std::cout << i << "\t" << node.name() << std::endl;
 
-    for (auto const & name: nodesNames)
+    for (auto const & name : nodesNames)
     {
       if (node.name().compare(name) == 0)
       {
diff --git a/include/otbTensorflowGraphOperations.h b/include/otbTensorflowGraphOperations.h
index 6ad4a4e2..b2495086 100644
--- a/include/otbTensorflowGraphOperations.h
+++ b/include/otbTensorflowGraphOperations.h
@@ -27,27 +27,36 @@
 // OTB log
 #include "otbMacro.h"
 
-namespace otb {
-namespace tf {
+namespace otb
+{
+namespace tf
+{
 
 // Load SavedModel variables
-void RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
+void
+RestoreModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
 
 // Save SavedModel variables
-void SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
+void
+SaveModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle);
 
 // Load SavedModel
-void LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle, std::vector<std::string> tagList);
+void
+LoadModel(const tensorflow::tstring path, tensorflow::SavedModelBundle & bundle, std::vector<std::string> tagList);
 
 // Get the following attributes of the specified tensors (by name) of a graph:
 // - shape
 // - datatype
 // Here we assume that the node's output is a tensor
-void GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::TensorInfo> layers, std::vector<std::string> & tensorsNames,
-    std::vector<tensorflow::TensorShapeProto> & shapes, std::vector<tensorflow::DataType> & dataTypes);
+void
+GetTensorAttributes(const tensorflow::protobuf::Map<std::string, tensorflow::TensorInfo> layers,
+                    std::vector<std::string> &                                           tensorsNames,
+                    std::vector<tensorflow::TensorShapeProto> &                          shapes,
+                    std::vector<tensorflow::DataType> &                                  dataTypes);
 
 // Print a lot of stuff about the specified nodes of the graph
-void PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::string> & nodesNames);
+void
+PrintNodeAttributes(const tensorflow::GraphDef & graph, const std::vector<std::string> & nodesNames);
 
 } // end namespace tf
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelBase.h b/include/otbTensorflowMultisourceModelBase.h
index d10648ea..a1418b0c 100644
--- a/include/otbTensorflowMultisourceModelBase.h
+++ b/include/otbTensorflowMultisourceModelBase.h
@@ -65,34 +65,32 @@ namespace otb
  *
  * \ingroup OTBTensorflow
  */
-template <class TInputImage, class TOutputImage=TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelBase :
-public itk::ImageToImageFilter<TInputImage, TOutputImage>
+template <class TInputImage, class TOutputImage = TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelBase : public itk::ImageToImageFilter<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowMultisourceModelBase             Self;
+  typedef TensorflowMultisourceModelBase                     Self;
   typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
+  typedef itk::SmartPointer<Self>                            Pointer;
+  typedef itk::SmartPointer<const Self>                      ConstPointer;
 
   /** Run-time type information (and related methods). */
   itkTypeMacro(TensorflowMultisourceModelBase, itk::ImageToImageFilter);
 
   /** Images typedefs */
-  typedef TInputImage                                ImageType;
-  typedef typename TInputImage::Pointer              ImagePointerType;
-  typedef typename TInputImage::PixelType            PixelType;
-  typedef typename TInputImage::InternalPixelType    InternalPixelType;
-  typedef typename TInputImage::IndexType            IndexType;
-  typedef typename TInputImage::IndexValueType       IndexValueType;
-  typedef typename TInputImage::PointType            PointType;
-  typedef typename TInputImage::SizeType             SizeType;
-  typedef typename TInputImage::SizeValueType        SizeValueType;
-  typedef typename TInputImage::SpacingType          SpacingType;
-  typedef typename TInputImage::RegionType           RegionType;
+  typedef TInputImage                             ImageType;
+  typedef typename TInputImage::Pointer           ImagePointerType;
+  typedef typename TInputImage::PixelType         PixelType;
+  typedef typename TInputImage::InternalPixelType InternalPixelType;
+  typedef typename TInputImage::IndexType         IndexType;
+  typedef typename TInputImage::IndexValueType    IndexValueType;
+  typedef typename TInputImage::PointType         PointType;
+  typedef typename TInputImage::SizeType          SizeType;
+  typedef typename TInputImage::SizeValueType     SizeValueType;
+  typedef typename TInputImage::SpacingType       SpacingType;
+  typedef typename TInputImage::RegionType        RegionType;
 
   /** Typedefs for parameters */
   typedef std::pair<std::string, tensorflow::Tensor> DictElementType;
@@ -104,15 +102,26 @@ public:
   typedef std::vector<tensorflow::Tensor>            TensorListType;
 
   /** Set and Get the Tensorflow session and graph */
-  void SetSavedModel(tensorflow::SavedModelBundle * saved_model) {m_SavedModel = saved_model;}
-  tensorflow::SavedModelBundle * GetSavedModel() {return m_SavedModel;}
+  void
+  SetSavedModel(tensorflow::SavedModelBundle * saved_model)
+  {
+    m_SavedModel = saved_model;
+  }
+  tensorflow::SavedModelBundle *
+  GetSavedModel()
+  {
+    return m_SavedModel;
+  }
 
   /** Get the SignatureDef */
-  tensorflow::SignatureDef GetSignatureDef();
+  tensorflow::SignatureDef
+  GetSignatureDef();
 
   /** Model parameters */
-  void PushBackInputTensorBundle(std::string name, SizeType receptiveField, ImagePointerType image);
-  void PushBackOuputTensorBundle(std::string name, SizeType expressionField);
+  void
+  PushBackInputTensorBundle(std::string name, SizeType receptiveField, ImagePointerType image);
+  void
+  PushBackOuputTensorBundle(std::string name, SizeType expressionField);
 
   /** Input placeholders names */
   itkSetMacro(InputPlaceholders, StringList);
@@ -131,8 +140,16 @@ public:
   itkGetMacro(OutputExpressionFields, SizeListType);
 
   /** User placeholders */
-  void SetUserPlaceholders(const DictType & dict) {m_UserPlaceholders = dict;}
-  DictType GetUserPlaceholders() {return m_UserPlaceholders;}
+  void
+  SetUserPlaceholders(const DictType & dict)
+  {
+    m_UserPlaceholders = dict;
+  }
+  DictType
+  GetUserPlaceholders()
+  {
+    return m_UserPlaceholders;
+  }
 
   /** Target nodes names */
   itkSetMacro(TargetNodesNames, StringList);
@@ -144,40 +161,44 @@ public:
   itkGetMacro(InputTensorsShapes, TensorShapeProtoList);
   itkGetMacro(OutputTensorsShapes, TensorShapeProtoList);
 
-  virtual void GenerateOutputInformation();
+  virtual void
+  GenerateOutputInformation();
 
 protected:
   TensorflowMultisourceModelBase();
-  virtual ~TensorflowMultisourceModelBase() {};
+  virtual ~TensorflowMultisourceModelBase(){};
 
-  virtual std::stringstream GenerateDebugReport(DictType & inputs);
+  virtual std::stringstream
+  GenerateDebugReport(DictType & inputs);
 
-  virtual void RunSession(DictType & inputs, TensorListType & outputs);
+  virtual void
+  RunSession(DictType & inputs, TensorListType & outputs);
 
 private:
-  TensorflowMultisourceModelBase(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelBase(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
   // Tensorflow graph and session
-  tensorflow::SavedModelBundle * m_SavedModel;          // The TensorFlow model
+  tensorflow::SavedModelBundle * m_SavedModel; // The TensorFlow model
 
   // Model parameters
-  StringList                 m_InputPlaceholders;       // Input placeholders names
-  SizeListType               m_InputReceptiveFields;    // Input receptive fields
-  StringList                 m_OutputTensors;           // Output tensors names
-  SizeListType               m_OutputExpressionFields;  // Output expression fields
-  DictType                   m_UserPlaceholders;        // User placeholders
-  StringList                 m_TargetNodesNames;        // User nodes target
+  StringList   m_InputPlaceholders;      // Input placeholders names
+  SizeListType m_InputReceptiveFields;   // Input receptive fields
+  StringList   m_OutputTensors;          // Output tensors names
+  SizeListType m_OutputExpressionFields; // Output expression fields
+  DictType     m_UserPlaceholders;       // User placeholders
+  StringList   m_TargetNodesNames;       // User nodes target
 
   // Internal, read-only
-  DataTypeListType           m_InputTensorsDataTypes;   // Input tensors datatype
-  DataTypeListType           m_OutputTensorsDataTypes;  // Output tensors datatype
-  TensorShapeProtoList       m_InputTensorsShapes;      // Input tensors shapes
-  TensorShapeProtoList       m_OutputTensorsShapes;     // Output tensors shapes
+  DataTypeListType     m_InputTensorsDataTypes;  // Input tensors datatype
+  DataTypeListType     m_OutputTensorsDataTypes; // Output tensors datatype
+  TensorShapeProtoList m_InputTensorsShapes;     // Input tensors shapes
+  TensorShapeProtoList m_OutputTensorsShapes;    // Output tensors shapes
 
   // Layer names inside the model corresponding to inputs and outputs
-  StringList m_InputLayers;                             // List of input names, as contained in the model
-  StringList m_OutputLayers;                            // List of output names, as contained in the model
+  StringList m_InputLayers;  // List of input names, as contained in the model
+  StringList m_OutputLayers; // List of output names, as contained in the model
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelBase.hxx b/include/otbTensorflowMultisourceModelBase.hxx
index 752b7c9d..ccefde8f 100644
--- a/include/otbTensorflowMultisourceModelBase.hxx
+++ b/include/otbTensorflowMultisourceModelBase.hxx
@@ -18,28 +18,26 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::TensorflowMultisourceModelBase()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::TensorflowMultisourceModelBase()
 {
-  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max() );
-  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max() );
-  
+  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max());
+  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max());
+
   m_SavedModel = NULL;
 }
 
 template <class TInputImage, class TOutputImage>
 tensorflow::SignatureDef
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GetSignatureDef()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GetSignatureDef()
 {
-  auto signatures = this->GetSavedModel()->GetSignatures();
+  auto                     signatures = this->GetSavedModel()->GetSignatures();
   tensorflow::SignatureDef signature_def;
 
   if (signatures.size() == 0)
   {
-    itkExceptionMacro("There are no available signatures for this tag-set. \n" <<
-                      "Please check which tag-set to use by running "<<
-                      "`saved_model_cli show --dir your_model_dir --all`");
+    itkExceptionMacro("There are no available signatures for this tag-set. \n"
+                      << "Please check which tag-set to use by running "
+                      << "`saved_model_cli show --dir your_model_dir --all`");
   }
 
   // If serving_default key exists (which is the default for TF saved model), choose it as signature
@@ -57,8 +55,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::PushBackInputTensorBundle(std::string placeholder, SizeType receptiveField, ImagePointerType image)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::PushBackInputTensorBundle(std::string      placeholder,
+                                                                                     SizeType         receptiveField,
+                                                                                     ImagePointerType image)
 {
   Superclass::PushBackInput(image);
   m_InputReceptiveFields.push_back(receptiveField);
@@ -67,8 +66,7 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 std::stringstream
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GenerateDebugReport(DictType & inputs)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GenerateDebugReport(DictType & inputs)
 {
   // Create a debug report
   std::stringstream debugReport;
@@ -79,18 +77,18 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   debugReport << "Output image buffered region: " << outputReqRegion << "\n";
 
   // Describe inputs
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
   {
-    const ImagePointerType inputPtr = const_cast<TInputImage*>(this->GetInput(i));
-    const RegionType reqRegion = inputPtr->GetRequestedRegion();
+    const ImagePointerType inputPtr = const_cast<TInputImage *>(this->GetInput(i));
+    const RegionType       reqRegion = inputPtr->GetRequestedRegion();
     debugReport << "Input #" << i << ":\n";
     debugReport << "Requested region: " << reqRegion << "\n";
     debugReport << "Tensor \"" << inputs[i].first << "\": " << tf::PrintTensorInfos(inputs[i].second) << "\n";
   }
 
   // Show user placeholders
-  debugReport << "User placeholders:\n" ;
-  for (auto& dict: this->GetUserPlaceholders())
+  debugReport << "User placeholders:\n";
+  for (auto & dict : this->GetUserPlaceholders())
   {
     debugReport << "Tensor \"" << dict.first << "\": " << tf::PrintTensorInfos(dict.second) << "\n" << std::endl;
   }
@@ -101,8 +99,7 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::RunSession(DictType & inputs, TensorListType & outputs)
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::RunSession(DictType & inputs, TensorListType & outputs)
 {
 
   // Add the user's placeholders
@@ -111,15 +108,16 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   // Run the TF session here
   // The session will initialize the outputs
 
-  // `inputs` corresponds to a mapping {name, tensor}, with the name being specified by the user when calling TensorFlowModelServe
-  // we must adapt it to `inputs_new`, that corresponds to a mapping {layerName, tensor}, with the layerName being from the model
+  // `inputs` corresponds to a mapping {name, tensor}, with the name being specified by the user when calling
+  // TensorFlowModelServe we must adapt it to `inputs_new`, that corresponds to a mapping {layerName, tensor}, with the
+  // layerName being from the model
   DictType inputs_new;
-  int k = 0;
-  for (auto& dict: inputs)
+  int      k = 0;
+  for (auto & dict : inputs)
   {
-    DictElementType element = {m_InputLayers[k], dict.second};
+    DictElementType element = { m_InputLayers[k], dict.second };
     inputs_new.push_back(element);
-    k+=1;
+    k += 1;
   }
 
   // Run the session, evaluating our output tensors from the graph
@@ -132,16 +130,18 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
     std::stringstream debugReport = GenerateDebugReport(inputs);
 
     // Throw an exception with the report
-    itkExceptionMacro("Can't run the tensorflow session !\n" <<
-                      "Tensorflow error message:\n" << status.ToString() << "\n"
-                      "OTB Filter debug message:\n" << debugReport.str() );
+    itkExceptionMacro("Can't run the tensorflow session !\n"
+                      << "Tensorflow error message:\n"
+                      << status.ToString()
+                      << "\n"
+                         "OTB Filter debug message:\n"
+                      << debugReport.str());
   }
 }
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelBase<TInputImage, TOutputImage>
-::GenerateOutputInformation()
+TensorflowMultisourceModelBase<TInputImage, TOutputImage>::GenerateOutputInformation()
 {
 
   // Check that the number of the following is the same
@@ -151,9 +151,9 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   const unsigned int nbInputs = this->GetNumberOfInputs();
   if (nbInputs != m_InputReceptiveFields.size() || nbInputs != m_InputPlaceholders.size())
   {
-    itkExceptionMacro("Number of input images is " << nbInputs <<
-                      " but the number of input patches size is " << m_InputReceptiveFields.size() <<
-                      " and the number of input tensors names is " << m_InputPlaceholders.size());
+    itkExceptionMacro("Number of input images is "
+                      << nbInputs << " but the number of input patches size is " << m_InputReceptiveFields.size()
+                      << " and the number of input tensors names is " << m_InputPlaceholders.size());
   }
 
   //////////////////////////////////////////////////////////////////////////////////////////
@@ -168,8 +168,10 @@ TensorflowMultisourceModelBase<TInputImage, TOutputImage>
   // this will return m_OutputLayers = ['PartitionedCall:0', 'PartitionedCall:1']
   // In case the user hasn't named the output, e.g.  m_OutputTensors = [''],
   // this will return the first output m_OutputLayers = ['PartitionedCall:0']
-  tf::GetTensorAttributes(signaturedef.inputs(), m_InputPlaceholders, m_InputLayers, m_InputTensorsShapes, m_InputTensorsDataTypes);
-  tf::GetTensorAttributes(signaturedef.outputs(), m_OutputTensors, m_OutputLayers, m_OutputTensorsShapes, m_OutputTensorsDataTypes);
+  tf::GetTensorAttributes(
+    signaturedef.inputs(), m_InputPlaceholders, m_InputLayers, m_InputTensorsShapes, m_InputTensorsDataTypes);
+  tf::GetTensorAttributes(
+    signaturedef.outputs(), m_OutputTensors, m_OutputLayers, m_OutputTensorsShapes, m_OutputTensorsDataTypes);
 }
 
 
diff --git a/include/otbTensorflowMultisourceModelFilter.h b/include/otbTensorflowMultisourceModelFilter.h
index 36d781dd..bdf9a02d 100644
--- a/include/otbTensorflowMultisourceModelFilter.h
+++ b/include/otbTensorflowMultisourceModelFilter.h
@@ -80,12 +80,10 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TOutputImage>
-class ITK_EXPORT TensorflowMultisourceModelFilter :
-public TensorflowMultisourceModelBase<TInputImage, TOutputImage>
+class ITK_EXPORT TensorflowMultisourceModelFilter : public TensorflowMultisourceModelBase<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelFilter                          Self;
   typedef TensorflowMultisourceModelBase<TInputImage, TOutputImage> Superclass;
@@ -99,16 +97,16 @@ public:
   itkTypeMacro(TensorflowMultisourceModelFilter, TensorflowMultisourceModelBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType           ImageType;
-  typedef typename Superclass::ImagePointerType    ImagePointerType;
-  typedef typename Superclass::PixelType           PixelType;
-  typedef typename Superclass::IndexType           IndexType;
-  typedef typename IndexType::IndexValueType       IndexValueType;
-  typedef typename Superclass::PointType           PointType;
-  typedef typename Superclass::SizeType            SizeType;
-  typedef typename SizeType::SizeValueType         SizeValueType;
-  typedef typename Superclass::SpacingType         SpacingType;
-  typedef typename Superclass::RegionType          RegionType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::PixelType        PixelType;
+  typedef typename Superclass::IndexType        IndexType;
+  typedef typename IndexType::IndexValueType    IndexValueType;
+  typedef typename Superclass::PointType        PointType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename SizeType::SizeValueType      SizeValueType;
+  typedef typename Superclass::SpacingType      SpacingType;
+  typedef typename Superclass::RegionType       RegionType;
 
   typedef TOutputImage                             OutputImageType;
   typedef typename TOutputImage::PixelType         OutputPixelType;
@@ -119,12 +117,12 @@ public:
   typedef typename itk::ImageRegionConstIterator<TInputImage>              InputConstIteratorType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictElementType     DictElementType;
-  typedef typename Superclass::DictType            DictType;
-  typedef typename Superclass::StringList          StringList;
-  typedef typename Superclass::SizeListType        SizeListType;
-  typedef typename Superclass::TensorListType      TensorListType;
-  typedef std::vector<float>                       ScaleListType;
+  typedef typename Superclass::DictElementType DictElementType;
+  typedef typename Superclass::DictType        DictType;
+  typedef typename Superclass::StringList      StringList;
+  typedef typename Superclass::SizeListType    SizeListType;
+  typedef typename Superclass::TensorListType  TensorListType;
+  typedef std::vector<float>                   ScaleListType;
 
   itkSetMacro(OutputGridSize, SizeType);
   itkGetMacro(OutputGridSize, SizeType);
@@ -137,34 +135,43 @@ public:
 
 protected:
   TensorflowMultisourceModelFilter();
-  virtual ~TensorflowMultisourceModelFilter() {};
+  virtual ~TensorflowMultisourceModelFilter(){};
 
-  virtual void SmartPad(RegionType& region, const SizeType &patchSize);
-  virtual void SmartShrink(RegionType& region, const SizeType &patchSize);
-  virtual void ImageToExtent(ImageType* image, PointType &extentInf, PointType &extentSup, SizeType &patchSize);
-  virtual bool OutputRegionToInputRegion(const RegionType &outputRegion, RegionType &inputRegion, ImageType* &inputImage);
-  virtual void EnlargeToAlignedRegion(RegionType& region);
+  virtual void
+  SmartPad(RegionType & region, const SizeType & patchSize);
+  virtual void
+  SmartShrink(RegionType & region, const SizeType & patchSize);
+  virtual void
+  ImageToExtent(ImageType * image, PointType & extentInf, PointType & extentSup, SizeType & patchSize);
+  virtual bool
+  OutputRegionToInputRegion(const RegionType & outputRegion, RegionType & inputRegion, ImageType *& inputImage);
+  virtual void
+  EnlargeToAlignedRegion(RegionType & region);
 
-  virtual void GenerateOutputInformation(void);
+  virtual void
+  GenerateOutputInformation(void);
 
-  virtual void GenerateInputRequestedRegion(void);
+  virtual void
+  GenerateInputRequestedRegion(void);
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
 private:
-  TensorflowMultisourceModelFilter(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelFilter(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  SizeType                   m_OutputGridSize;       // Output grid size
-  bool                       m_ForceOutputGridSize;  // Force output grid size
-  bool                       m_FullyConvolutional;   // Convolution mode
-  float                      m_OutputSpacingScale;   // scaling of the output spacings
+  SizeType m_OutputGridSize;      // Output grid size
+  bool     m_ForceOutputGridSize; // Force output grid size
+  bool     m_FullyConvolutional;  // Convolution mode
+  float    m_OutputSpacingScale;  // scaling of the output spacings
 
   // Internal
-  SpacingType                m_OutputSpacing;     // Output image spacing
-  PointType                  m_OutputOrigin;      // Output image origin
-  SizeType                   m_OutputSize;        // Output image size
-  PixelType                  m_NullPixel;         // Pixel filled with zeros
+  SpacingType m_OutputSpacing; // Output image spacing
+  PointType   m_OutputOrigin;  // Output image origin
+  SizeType    m_OutputSize;    // Output image size
+  PixelType   m_NullPixel;     // Pixel filled with zeros
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelFilter.hxx b/include/otbTensorflowMultisourceModelFilter.hxx
index d208f01a..3cbb53d9 100644
--- a/include/otbTensorflowMultisourceModelFilter.hxx
+++ b/include/otbTensorflowMultisourceModelFilter.hxx
@@ -18,9 +18,8 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::TensorflowMultisourceModelFilter()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::TensorflowMultisourceModelFilter()
+{
   m_OutputGridSize.Fill(0);
   m_ForceOutputGridSize = false;
   m_FullyConvolutional = false;
@@ -31,38 +30,37 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   m_OutputSpacingScale = 1.0f;
 
-  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max() );
-  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max() );
- }
+  Superclass::SetCoordinateTolerance(itk::NumericTraits<double>::max());
+  Superclass::SetDirectionTolerance(itk::NumericTraits<double>::max());
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::SmartPad(RegionType& region, const SizeType &patchSize)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::SmartPad(RegionType & region, const SizeType & patchSize)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     const SizeValueType psz = patchSize[dim];
     const SizeValueType rval = 0.5 * psz;
     const SizeValueType lval = psz - rval;
     region.GetModifiableIndex()[dim] -= lval;
     region.GetModifiableSize()[dim] += psz;
-    }
- }
+  }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::SmartShrink(RegionType& region, const SizeType &patchSize)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::SmartShrink(RegionType &     region,
+                                                                         const SizeType & patchSize)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     const SizeValueType psz = patchSize[dim];
     const SizeValueType lval = 0.5 * psz;
     region.GetModifiableIndex()[dim] += lval;
     region.GetModifiableSize()[dim] -= psz - 1;
-    }
- }
+  }
+}
 
 /**
   Compute the input image extent: corners inf and sup.
@@ -70,9 +68,11 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::ImageToExtent(ImageType* image, PointType &extentInf, PointType &extentSup, SizeType &patchSize)
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::ImageToExtent(ImageType * image,
+                                                                           PointType & extentInf,
+                                                                           PointType & extentSup,
+                                                                           SizeType &  patchSize)
+{
 
   // Get largest possible region
   RegionType largestPossibleRegion = image->GetLargestPossibleRegion();
@@ -89,13 +89,12 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   PointType imageEnd;
   image->TransformIndexToPhysicalPoint(imageLastIndex, imageEnd);
   image->TransformIndexToPhysicalPoint(imageFirstIndex, imageOrigin);
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     extentInf[dim] = vnl_math_min(imageOrigin[dim], imageEnd[dim]) - 0.5 * image->GetSpacing()[dim];
     extentSup[dim] = vnl_math_max(imageOrigin[dim], imageEnd[dim]) + 0.5 * image->GetSpacing()[dim];
-    }
-
- }
+  }
+}
 
 /**
   Compute the region of the input image which correspond to the given output requested region
@@ -104,9 +103,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 bool
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::OutputRegionToInputRegion(const RegionType &outputRegion, RegionType &inputRegion, ImageType* &inputImage)
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::OutputRegionToInputRegion(const RegionType & outputRegion,
+                                                                                       RegionType &       inputRegion,
+                                                                                       ImageType *&       inputImage)
+{
 
   // Mosaic Region Start & End (mosaic image index)
   const IndexType outIndexStart = outputRegion.GetIndex();
@@ -115,45 +115,43 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   // Mosaic Region Start & End (geo)
   PointType outPointStart, outPointEnd;
   this->GetOutput()->TransformIndexToPhysicalPoint(outIndexStart, outPointStart);
-  this->GetOutput()->TransformIndexToPhysicalPoint(outIndexEnd  , outPointEnd  );
+  this->GetOutput()->TransformIndexToPhysicalPoint(outIndexEnd, outPointEnd);
 
   // Add the half-width pixel size of the input image
   // and remove the half-width pixel size of the output image
   // (coordinates = pixel center)
   const SpacingType outputSpc = this->GetOutput()->GetSpacing();
   const SpacingType inputSpc = inputImage->GetSpacing();
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
-    const typename SpacingType::ValueType border =
-        0.5 * (inputSpc[dim] - outputSpc[dim]);
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
+    const typename SpacingType::ValueType border = 0.5 * (inputSpc[dim] - outputSpc[dim]);
     if (outPointStart[dim] < outPointEnd[dim])
-      {
+    {
       outPointStart[dim] += border;
-      outPointEnd  [dim] -= border;
-      }
+      outPointEnd[dim] -= border;
+    }
     else
-      {
+    {
       outPointStart[dim] -= border;
-      outPointEnd  [dim] += border;
-      }
+      outPointEnd[dim] += border;
     }
+  }
 
   // Mosaic Region Start & End (input image index)
   IndexType defIndexStart, defIndexEnd;
   inputImage->TransformPhysicalPointToIndex(outPointStart, defIndexStart);
-  inputImage->TransformPhysicalPointToIndex(outPointEnd  , defIndexEnd);
+  inputImage->TransformPhysicalPointToIndex(outPointEnd, defIndexEnd);
 
   // Compute input image region
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     inputRegion.SetIndex(dim, vnl_math_min(defIndexStart[dim], defIndexEnd[dim]));
     inputRegion.SetSize(dim, vnl_math_max(defIndexStart[dim], defIndexEnd[dim]) - inputRegion.GetIndex(dim) + 1);
-    }
+  }
 
   // crop the input requested region at the input's largest possible region
-  return inputRegion.Crop( inputImage->GetLargestPossibleRegion() );
-
- }
+  return inputRegion.Crop(inputImage->GetLargestPossibleRegion());
+}
 
 /*
  * Enlarge the given region to the nearest aligned region.
@@ -161,11 +159,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::EnlargeToAlignedRegion(RegionType& region)
- {
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::EnlargeToAlignedRegion(RegionType & region)
+{
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     // Get corners
     IndexValueType lower = region.GetIndex(dim);
     IndexValueType upper = lower + region.GetSize(dim);
@@ -177,22 +174,20 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     // Move corners to aligned positions
     lower -= deltaLo;
     if (deltaUp > 0)
-      {
+    {
       upper += m_OutputGridSize[dim] - deltaUp;
-      }
+    }
 
     // Update region
     region.SetIndex(dim, lower);
     region.SetSize(dim, upper - lower);
-
-    }
- }
+  }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateOutputInformation()
+{
 
   Superclass::GenerateOutputInformation();
 
@@ -204,8 +199,8 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   // OTBTF assumes that the output image has the following geometric properties:
   // (1) Image origin is the top-left pixel
   // (2) Image pixel spacing has positive x-spacing and negative y-spacing
-  m_OutputSpacing = this->GetInput(0)->GetSpacing();  // GetSpacing() returns abs. spacing
-  m_OutputSpacing[1] *= -1.0;  // Force negative y-spacing
+  m_OutputSpacing = this->GetInput(0)->GetSpacing(); // GetSpacing() returns abs. spacing
+  m_OutputSpacing[1] *= -1.0;                        // Force negative y-spacing
   m_OutputSpacing[0] *= m_OutputSpacingScale;
   m_OutputSpacing[1] *= m_OutputSpacingScale;
 
@@ -214,30 +209,32 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   PointType extentInf, extentSup;
   extentSup.Fill(itk::NumericTraits<double>::max());
   extentInf.Fill(itk::NumericTraits<double>::NonpositiveMin());
-  for (unsigned int imageIndex = 0 ; imageIndex < this->GetNumberOfInputs() ; imageIndex++)
-    {
-    ImageType * currentImage = static_cast<ImageType *>(
-        Superclass::ProcessObject::GetInput(imageIndex) );
+  for (unsigned int imageIndex = 0; imageIndex < this->GetNumberOfInputs(); imageIndex++)
+  {
+    ImageType * currentImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(imageIndex));
 
     // Update output image extent
     PointType currentInputImageExtentInf, currentInputImageExtentSup;
-    ImageToExtent(currentImage, currentInputImageExtentInf, currentInputImageExtentSup, this->GetInputReceptiveFields()[imageIndex]);
-    for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-      {
+    ImageToExtent(currentImage,
+                  currentInputImageExtentInf,
+                  currentInputImageExtentSup,
+                  this->GetInputReceptiveFields()[imageIndex]);
+    for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+    {
       extentInf[dim] = vnl_math_max(currentInputImageExtentInf[dim], extentInf[dim]);
       extentSup[dim] = vnl_math_min(currentInputImageExtentSup[dim], extentSup[dim]);
-      }
     }
+  }
 
 
   // Set final origin, aligned to the reference image grid.
   // Here we simply get back to the center of the pixel (extents are pixels corners coordinates)
-  m_OutputOrigin[0] =  extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0];
-  m_OutputOrigin[1] =  extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1];
+  m_OutputOrigin[0] = extentInf[0] + 0.5 * this->GetInput(0)->GetSpacing()[0];
+  m_OutputOrigin[1] = extentSup[1] - 0.5 * this->GetInput(0)->GetSpacing()[1];
 
   // Set final size
-  m_OutputSize[0] = std::floor( (extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]) );
-  m_OutputSize[1] = std::floor( (extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]) );
+  m_OutputSize[0] = std::floor((extentSup[0] - extentInf[0]) / std::abs(m_OutputSpacing[0]));
+  m_OutputSize[1] = std::floor((extentSup[1] - extentInf[1]) / std::abs(m_OutputSpacing[1]));
 
   // We should take in account one more thing: the expression field. It enlarge slightly the output image extent.
   m_OutputOrigin[0] -= m_OutputSpacing[0] * std::floor(0.5 * this->GetOutputExpressionFields().at(0)[0]);
@@ -247,18 +244,18 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   // Set output grid size
   if (!m_ForceOutputGridSize)
-    {
+  {
     // Default is the output field of expression
     m_OutputGridSize = this->GetOutputExpressionFields().at(0);
-    }
+  }
 
   // Resize the largestPossibleRegion to be a multiple of the grid size
-  for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+  {
     if (m_OutputGridSize[dim] > m_OutputSize[dim])
       itkGenericExceptionMacro("Output grid size is larger than output image size !");
     m_OutputSize[dim] -= m_OutputSize[dim] % m_OutputGridSize[dim];
-    }
+  }
 
   // Set the largest possible region
   RegionType largestPossibleRegion;
@@ -269,38 +266,39 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   //////////////////////////////////////////////////////////////////////////////////////////
 
   unsigned int outputPixelSize = 0;
-  for (auto& protoShape: this->GetOutputTensorsShapes())
-    {
+  for (auto & protoShape : this->GetOutputTensorsShapes())
+  {
     // Find the number of components
     if (protoShape.dim_size() > 4)
-      {
-      itkExceptionMacro("dim_size=" << protoShape.dim_size() << " currently not supported. "
-          "Keep in mind that output tensors must have 1, 2, 3 or 4 dimensions. "
-          "In the case of 1-dimensional tensor, the first dimension is for the batch, "
-          "and we assume that the output tensor has 1 channel. "
-          "In the case of 2-dimensional tensor, the first dimension is for the batch, "
-          "and the second is the number of components. "
-          "In the case of 3-dimensional tensor, the first dimension is for the batch, "
-          "and other dims are for (x, y). "
-          "In the case of 4-dimensional tensor, the first dimension is for the batch, "
-          "and the second and the third are for (x, y). The last is for the number of "
-          "channels. ");
-      }
+    {
+      itkExceptionMacro("dim_size=" << protoShape.dim_size()
+                                    << " currently not supported. "
+                                       "Keep in mind that output tensors must have 1, 2, 3 or 4 dimensions. "
+                                       "In the case of 1-dimensional tensor, the first dimension is for the batch, "
+                                       "and we assume that the output tensor has 1 channel. "
+                                       "In the case of 2-dimensional tensor, the first dimension is for the batch, "
+                                       "and the second is the number of components. "
+                                       "In the case of 3-dimensional tensor, the first dimension is for the batch, "
+                                       "and other dims are for (x, y). "
+                                       "In the case of 4-dimensional tensor, the first dimension is for the batch, "
+                                       "and the second and the third are for (x, y). The last is for the number of "
+                                       "channels. ");
+    }
     unsigned int nComponents = tf::GetNumberOfChannelsFromShapeProto(protoShape);
     outputPixelSize += nComponents;
-    }
+  }
 
   // Copy input image projection
-  ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(0) );
+  ImageType *       inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(0));
   const std::string projectionRef = inputImage->GetProjectionRef();
 
   // Set output image origin/spacing/size/projection
   ImageType * outputPtr = this->GetOutput();
   outputPtr->SetNumberOfComponentsPerPixel(outputPixelSize);
-  outputPtr->SetProjectionRef        ( projectionRef );
-  outputPtr->SetOrigin               ( m_OutputOrigin );
-  outputPtr->SetSignedSpacing        ( m_OutputSpacing );
-  outputPtr->SetLargestPossibleRegion( largestPossibleRegion );
+  outputPtr->SetProjectionRef(projectionRef);
+  outputPtr->SetOrigin(m_OutputOrigin);
+  outputPtr->SetSignedSpacing(m_OutputSpacing);
+  outputPtr->SetLargestPossibleRegion(largestPossibleRegion);
 
   // Set null pixel
   m_NullPixel.SetSize(outputPtr->GetNumberOfComponentsPerPixel());
@@ -312,14 +310,12 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   itk::EncapsulateMetaData(outputPtr->GetMetaDataDictionary(), MetaDataKey::TileHintX, m_OutputGridSize[0]);
   itk::EncapsulateMetaData(outputPtr->GetMetaDataDictionary(), MetaDataKey::TileHintY, m_OutputGridSize[1]);
-
- }
+}
 
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateInputRequestedRegion()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateInputRequestedRegion()
+{
   Superclass::GenerateInputRequestedRegion();
 
   // Output requested region
@@ -329,35 +325,37 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
   EnlargeToAlignedRegion(requestedRegion);
 
   // For each image, get the requested region
-  for(unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
-    {
-    ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(i) );
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
+  {
+    ImageType * inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(i));
 
     // Compute the requested region
     RegionType inRegion;
-    if (!OutputRegionToInputRegion(requestedRegion, inRegion, inputImage) )
-      {
+    if (!OutputRegionToInputRegion(requestedRegion, inRegion, inputImage))
+    {
       // Image does not overlap requested region: set requested region to null
-      otbLogMacro(Debug,  << "Image #" << i << " :\n" << inRegion << " is outside the requested region");
+      otbLogMacro(Debug, << "Image #" << i << " :\n" << inRegion << " is outside the requested region");
       inRegion.GetModifiableIndex().Fill(0);
       inRegion.GetModifiableSize().Fill(0);
-      }
+    }
 
     // Compute the FOV-scale*FOE radius to pad
     SizeType toPad(this->GetInputReceptiveFields().at(i));
-    for(unsigned int dim = 0; dim<ImageType::ImageDimension; ++dim)
-      {
-      int valToPad = 1 + (this->GetOutputExpressionFields().at(0)[dim] - 1) * m_OutputSpacingScale * this->GetInput(0)->GetSpacing()[dim] / this->GetInput(i)->GetSpacing()[dim] ;
+    for (unsigned int dim = 0; dim < ImageType::ImageDimension; ++dim)
+    {
+      int valToPad = 1 + (this->GetOutputExpressionFields().at(0)[dim] - 1) * m_OutputSpacingScale *
+                           this->GetInput(0)->GetSpacing()[dim] / this->GetInput(i)->GetSpacing()[dim];
       if (valToPad > toPad[dim])
-        itkExceptionMacro("The input requested region of source #" << i << " is not consistent (dim "<< dim<< ")." <<
-                          "Please check RF, EF, SF vs physical spacing of your image!" <<
-                          "\nReceptive field: " << this->GetInputReceptiveFields().at(i)[dim] <<
-                          "\nExpression field: " << this->GetOutputExpressionFields().at(0)[dim] <<
-                          "\nScale factor: " << m_OutputSpacingScale <<
-                          "\nReference image spacing: " << this->GetInput(0)->GetSpacing()[dim] <<
-                          "\nImage " << i << " spacing: " << this->GetInput(i)->GetSpacing()[dim]);
+        itkExceptionMacro("The input requested region of source #"
+                          << i << " is not consistent (dim " << dim << ")."
+                          << "Please check RF, EF, SF vs physical spacing of your image!"
+                          << "\nReceptive field: " << this->GetInputReceptiveFields().at(i)[dim]
+                          << "\nExpression field: " << this->GetOutputExpressionFields().at(0)[dim]
+                          << "\nScale factor: " << m_OutputSpacingScale
+                          << "\nReference image spacing: " << this->GetInput(0)->GetSpacing()[dim] << "\nImage " << i
+                          << " spacing: " << this->GetInput(i)->GetSpacing()[dim]);
       toPad[dim] -= valToPad;
-      }
+    }
 
     // Pad with radius
     SmartPad(inRegion, toPad);
@@ -368,30 +366,28 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     // can be one pixel larger when the input image regions are not physically
     // aligned.
     if (!m_FullyConvolutional)
-      {
+    {
       inRegion.PadByRadius(1);
-      }
+    }
 
     inRegion.Crop(inputImage->GetLargestPossibleRegion());
 
     // Update the requested region
     inputImage->SetRequestedRegion(inRegion);
 
-    } // next image
-
- }
+  } // next image
+}
 
 /**
  * Compute the output image
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelFilter<TInputImage, TOutputImage>::GenerateData()
+{
   // Output pointer and requested region
   typename TOutputImage::Pointer outputPtr = this->GetOutput();
-  const RegionType outputReqRegion = outputPtr->GetRequestedRegion();
+  const RegionType               outputReqRegion = outputPtr->GetRequestedRegion();
 
   // Get the aligned output requested region
   RegionType outputAlignedReqRegion(outputReqRegion);
@@ -404,10 +400,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
 
   // Populate input tensors
-  for (unsigned int i = 0 ; i < nInputs ; i++)
-    {
+  for (unsigned int i = 0; i < nInputs; i++)
+  {
     // Input image pointer
-    const ImagePointerType inputPtr = const_cast<TInputImage*>(this->GetInput(i));
+    const ImagePointerType inputPtr = const_cast<TInputImage *>(this->GetInput(i));
 
     // Patch size of tensor #i
     const SizeType inputPatchSize = this->GetInputReceptiveFields().at(i);
@@ -416,13 +412,13 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
     const RegionType reqRegion = inputPtr->GetRequestedRegion();
 
     if (m_FullyConvolutional)
-      {
+    {
       // Shape of input tensor #i
-      tensorflow::int64 sz_n = 1;
-      tensorflow::int64 sz_y = reqRegion.GetSize(1);
-      tensorflow::int64 sz_x = reqRegion.GetSize(0);
-      tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-      tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
+      tensorflow::int64       sz_n = 1;
+      tensorflow::int64       sz_y = reqRegion.GetSize(1);
+      tensorflow::int64       sz_x = reqRegion.GetSize(0);
+      tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+      tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
 
       // Create the input tensor
       tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
@@ -433,16 +429,16 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
       // Input is the tensor representing the subset of image
       DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
       inputs.push_back(input);
-      }
+    }
     else
-      {
+    {
       // Preparing patches
       // Shape of input tensor #i
-      tensorflow::int64 sz_n = outputReqRegion.GetNumberOfPixels();
-      tensorflow::int64 sz_y = inputPatchSize[1];
-      tensorflow::int64 sz_x = inputPatchSize[0];
-      tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-      tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
+      tensorflow::int64       sz_n = outputReqRegion.GetNumberOfPixels();
+      tensorflow::int64       sz_y = inputPatchSize[1];
+      tensorflow::int64       sz_x = inputPatchSize[0];
+      tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+      tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
 
       // Create the input tensor
       tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
@@ -450,10 +446,10 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
       // Fill the input tensor.
       // We iterate over points which are located from the index iterator
       // moving through the output image requested region
-      unsigned int elemIndex = 0;
+      unsigned int      elemIndex = 0;
       IndexIteratorType idxIt(outputPtr, outputReqRegion);
       for (idxIt.GoToBegin(); !idxIt.IsAtEnd(); ++idxIt)
-        {
+      {
         // Get the coordinates of the current output pixel
         PointType point;
         outputPtr->TransformIndexToPhysicalPoint(idxIt.GetIndex(), point);
@@ -461,15 +457,15 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
         // Sample the i-th input patch centered on the point
         tf::SampleCenteredPatch<TInputImage>(inputPtr, point, inputPatchSize, inputTensor, elemIndex);
         elemIndex++;
-        }
+      }
 
       // Input is the tensor of patches (aka the batch)
       DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
       inputs.push_back(input);
 
-      } // mode is not full convolutional
+    } // mode is not full convolutional
 
-    } // next input tensor
+  } // next input tensor
 
   // Run session
   // TODO: see if we print some info about inputs/outputs of the model e.g. m_OutputTensors
@@ -483,26 +479,25 @@ TensorflowMultisourceModelFilter<TInputImage, TOutputImage>
 
   // Get output tensors
   int bandOffset = 0;
-  for (unsigned int i = 0 ; i < outputs.size() ; i++)
-    {
+  for (unsigned int i = 0; i < outputs.size(); i++)
+  {
     // The offset (i.e. the starting index of the channel for the output tensor) is updated
     // during this call
-    // TODO: implement a generic strategy enabling expression field copy in patch-based mode (see tf::CopyTensorToImageRegion)
+    // TODO: implement a generic strategy enabling expression field copy in patch-based mode (see
+    // tf::CopyTensorToImageRegion)
     try
-      {
-      tf::CopyTensorToImageRegion<TOutputImage> (outputs[i],
-          outputAlignedReqRegion, outputPtr, outputReqRegion, bandOffset);
-      }
-    catch( itk::ExceptionObject & err )
-      {
+    {
+      tf::CopyTensorToImageRegion<TOutputImage>(
+        outputs[i], outputAlignedReqRegion, outputPtr, outputReqRegion, bandOffset);
+    }
+    catch (itk::ExceptionObject & err)
+    {
       std::stringstream debugMsg = this->GenerateDebugReport(inputs);
       itkExceptionMacro("Error occurred during tensor to image conversion.\n"
-          << "Context: " << debugMsg.str()
-          << "Error:" << err);
-      }
+                        << "Context: " << debugMsg.str() << "Error:" << err);
     }
-
- }
+  }
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelLearningBase.h b/include/otbTensorflowMultisourceModelLearningBase.h
index 0663f17a..6e01317d 100644
--- a/include/otbTensorflowMultisourceModelLearningBase.h
+++ b/include/otbTensorflowMultisourceModelLearningBase.h
@@ -53,37 +53,35 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelLearningBase :
-public TensorflowMultisourceModelBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelLearningBase : public TensorflowMultisourceModelBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowMultisourceModelLearningBase       Self;
-  typedef TensorflowMultisourceModelBase<TInputImage>  Superclass;
-  typedef itk::SmartPointer<Self>                      Pointer;
-  typedef itk::SmartPointer<const Self>                ConstPointer;
+  typedef TensorflowMultisourceModelLearningBase      Self;
+  typedef TensorflowMultisourceModelBase<TInputImage> Superclass;
+  typedef itk::SmartPointer<Self>                     Pointer;
+  typedef itk::SmartPointer<const Self>               ConstPointer;
 
   /** Run-time type information (and related methods). */
   itkTypeMacro(TensorflowMultisourceModelLearningBase, TensorflowMultisourceModelBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType         ImageType;
-  typedef typename Superclass::ImagePointerType  ImagePointerType;
-  typedef typename Superclass::RegionType        RegionType;
-  typedef typename Superclass::SizeType          SizeType;
-  typedef typename Superclass::IndexType         IndexType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::RegionType       RegionType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename Superclass::IndexType        IndexType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::DictElementType   DictElementType;
-  typedef typename Superclass::StringList        StringList;
-  typedef typename Superclass::SizeListType      SizeListType;
-  typedef typename Superclass::TensorListType    TensorListType;
+  typedef typename Superclass::DictType        DictType;
+  typedef typename Superclass::DictElementType DictElementType;
+  typedef typename Superclass::StringList      StringList;
+  typedef typename Superclass::SizeListType    SizeListType;
+  typedef typename Superclass::TensorListType  TensorListType;
 
   /* Typedefs for index */
-  typedef typename ImageType::IndexValueType     IndexValueType;
-  typedef std::vector<IndexValueType>            IndexListType;
+  typedef typename ImageType::IndexValueType IndexValueType;
+  typedef std::vector<IndexValueType>        IndexListType;
 
   // Batch size
   itkSetMacro(BatchSize, IndexValueType);
@@ -98,29 +96,36 @@ public:
 
 protected:
   TensorflowMultisourceModelLearningBase();
-  virtual ~TensorflowMultisourceModelLearningBase() {};
+  virtual ~TensorflowMultisourceModelLearningBase(){};
 
-  virtual void GenerateOutputInformation(void) override;
+  virtual void
+  GenerateOutputInformation(void) override;
 
-  virtual void GenerateInputRequestedRegion();
+  virtual void
+  GenerateInputRequestedRegion();
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
-  virtual void PopulateInputTensors(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize, const IndexListType & order);
+  virtual void
+  PopulateInputTensors(DictType &             inputs,
+                       const IndexValueType & sampleStart,
+                       const IndexValueType & batchSize,
+                       const IndexListType &  order);
 
-  virtual void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize) = 0;
+  virtual void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize) = 0;
 
 private:
-  TensorflowMultisourceModelLearningBase(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelLearningBase(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  unsigned int          m_BatchSize;       // Batch size
-  bool                  m_UseStreaming;    // Use streaming on/off
+  unsigned int m_BatchSize;    // Batch size
+  bool         m_UseStreaming; // Use streaming on/off
 
   // Read only
-  IndexValueType        m_NumberOfSamples; // Number of samples
+  IndexValueType m_NumberOfSamples; // Number of samples
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelLearningBase.hxx b/include/otbTensorflowMultisourceModelLearningBase.hxx
index 28b2328b..bfa26d4d 100644
--- a/include/otbTensorflowMultisourceModelLearningBase.hxx
+++ b/include/otbTensorflowMultisourceModelLearningBase.hxx
@@ -18,39 +18,38 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelLearningBase<TInputImage>
-::TensorflowMultisourceModelLearningBase(): m_BatchSize(100),
-m_UseStreaming(false), m_NumberOfSamples(0)
- {
- }
+TensorflowMultisourceModelLearningBase<TInputImage>::TensorflowMultisourceModelLearningBase()
+  : m_BatchSize(100)
+  , m_UseStreaming(false)
+  , m_NumberOfSamples(0)
+{}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateOutputInformation()
+{
   Superclass::GenerateOutputInformation();
 
   // Set an empty output buffered region
   ImageType * outputPtr = this->GetOutput();
-  RegionType nullRegion;
+  RegionType  nullRegion;
   nullRegion.GetModifiableSize().Fill(1);
   outputPtr->SetNumberOfComponentsPerPixel(1);
-  outputPtr->SetLargestPossibleRegion( nullRegion );
+  outputPtr->SetLargestPossibleRegion(nullRegion);
 
   // Count the number of samples
   m_NumberOfSamples = 0;
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
-    {
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
+  {
     // Input image pointer
-    ImagePointerType inputPtr = const_cast<ImageType*>(this->GetInput(i));
+    ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
 
     // Make sure input is available
-    if ( inputPtr.IsNull() )
-      {
+    if (inputPtr.IsNull())
+    {
       itkExceptionMacro(<< "Input " << i << " is null!");
-      }
+    }
 
     // Update input information
     inputPtr->UpdateOutputInformation();
@@ -63,67 +62,62 @@ TensorflowMultisourceModelLearningBase<TInputImage>
 
     // Check size X
     if (inputPatchSize[0] != reqRegion.GetSize(0))
-      itkExceptionMacro("Patch size for input " << i
-          << " is " << inputPatchSize
-          << " but input patches image size is " << reqRegion.GetSize());
+      itkExceptionMacro("Patch size for input " << i << " is " << inputPatchSize << " but input patches image size is "
+                                                << reqRegion.GetSize());
 
     // Check size Y
     if (reqRegion.GetSize(1) % inputPatchSize[1] != 0)
       itkExceptionMacro("Input patches image must have a number of rows which is "
-          << "a multiple of the patch size Y! Patches image has " << reqRegion.GetSize(1)
-          << " rows but patch size Y is " <<  inputPatchSize[1] << " for input " << i);
+                        << "a multiple of the patch size Y! Patches image has " << reqRegion.GetSize(1)
+                        << " rows but patch size Y is " << inputPatchSize[1] << " for input " << i);
 
     // Get the batch size
     const IndexValueType currNumberOfSamples = reqRegion.GetSize(1) / inputPatchSize[1];
 
     // Check the consistency with other inputs
     if (m_NumberOfSamples == 0)
-      {
+    {
       m_NumberOfSamples = currNumberOfSamples;
-      }
+    }
     else if (m_NumberOfSamples != currNumberOfSamples)
-      {
-      itkGenericExceptionMacro("Batch size of input " << (i-1)
-          << " was " << m_NumberOfSamples
-          << " but input " << i
-          << " has a batch size of " << currNumberOfSamples );
-      }
-    } // next input
- }
+    {
+      itkGenericExceptionMacro("Batch size of input " << (i - 1) << " was " << m_NumberOfSamples << " but input " << i
+                                                      << " has a batch size of " << currNumberOfSamples);
+    }
+  } // next input
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateInputRequestedRegion()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateInputRequestedRegion()
+{
   Superclass::GenerateInputRequestedRegion();
 
   // For each image, set the requested region
   RegionType nullRegion;
-  for(unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
-    {
-    ImageType * inputImage = static_cast<ImageType * >( Superclass::ProcessObject::GetInput(i) );
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); ++i)
+  {
+    ImageType * inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(i));
 
     // If the streaming is enabled, we don't read the full image
     if (m_UseStreaming)
-      {
+    {
       inputImage->SetRequestedRegion(nullRegion);
-      }
+    }
     else
-      {
+    {
       inputImage->SetRequestedRegion(inputImage->GetLargestPossibleRegion());
-      }
-    } // next image
- }
+    }
+  } // next image
+}
 
 /**
  *
  */
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::GenerateData()
+{
 
   // Batches loop
   const IndexValueType nBatches = std::ceil(m_NumberOfSamples / m_BatchSize);
@@ -131,15 +125,15 @@ TensorflowMultisourceModelLearningBase<TInputImage>
 
   itk::ProgressReporter progress(this, 0, nBatches);
 
-  for (IndexValueType batch = 0 ; batch < nBatches ; batch++)
-    {
+  for (IndexValueType batch = 0; batch < nBatches; batch++)
+  {
 
     // Feed dict
     DictType inputs;
 
     // Batch start and size
     const IndexValueType sampleStart = batch * m_BatchSize;
-    IndexValueType batchSize = m_BatchSize;
+    IndexValueType       batchSize = m_BatchSize;
     if (rest != 0 && batch == nBatches - 1)
     {
       batchSize = rest;
@@ -149,40 +143,40 @@ TensorflowMultisourceModelLearningBase<TInputImage>
     this->ProcessBatch(inputs, sampleStart, batchSize);
 
     progress.CompletedPixel();
-    } // Next batch
-
- }
+  } // Next batch
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelLearningBase<TInputImage>
-::PopulateInputTensors(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize, const IndexListType & order)
- {
+TensorflowMultisourceModelLearningBase<TInputImage>::PopulateInputTensors(DictType &             inputs,
+                                                                          const IndexValueType & sampleStart,
+                                                                          const IndexValueType & batchSize,
+                                                                          const IndexListType &  order)
+{
   const bool reorder = order.size();
 
   // Populate input tensors
-  for (unsigned int i = 0 ; i < this->GetNumberOfInputs() ; i++)
-    {
+  for (unsigned int i = 0; i < this->GetNumberOfInputs(); i++)
+  {
     // Input image pointer
-    ImagePointerType inputPtr = const_cast<ImageType*>(this->GetInput(i));
+    ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
 
     // Patch size of tensor #i
     const SizeType inputPatchSize = this->GetInputReceptiveFields().at(i);
 
     // Create the tensor for the batch
-    const tensorflow::int64 sz_n = batchSize;
-    const tensorflow::int64 sz_y = inputPatchSize[1];
-    const tensorflow::int64 sz_x = inputPatchSize[0];
-    const tensorflow::int64 sz_c = inputPtr->GetNumberOfComponentsPerPixel();
-    const tensorflow::TensorShape inputTensorShape({sz_n, sz_y, sz_x, sz_c});
-    tensorflow::Tensor inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
+    const tensorflow::int64       sz_n = batchSize;
+    const tensorflow::int64       sz_y = inputPatchSize[1];
+    const tensorflow::int64       sz_x = inputPatchSize[0];
+    const tensorflow::int64       sz_c = inputPtr->GetNumberOfComponentsPerPixel();
+    const tensorflow::TensorShape inputTensorShape({ sz_n, sz_y, sz_x, sz_c });
+    tensorflow::Tensor            inputTensor(this->GetInputTensorsDataTypes()[i], inputTensorShape);
 
     // Populate the tensor
-    for (IndexValueType elem = 0 ; elem < batchSize ; elem++)
-      {
+    for (IndexValueType elem = 0; elem < batchSize; elem++)
+    {
       const tensorflow::uint64 samplePos = sampleStart + elem;
-      IndexType start;
+      IndexType                start;
       start[0] = 0;
       if (reorder)
       {
@@ -190,7 +184,8 @@ TensorflowMultisourceModelLearningBase<TInputImage>
       }
       else
       {
-        start[1] = samplePos * sz_y;;
+        start[1] = samplePos * sz_y;
+        ;
       }
       RegionType patchRegion(start, inputPatchSize);
       if (m_UseStreaming)
@@ -198,14 +193,14 @@ TensorflowMultisourceModelLearningBase<TInputImage>
         // If streaming is enabled, we need to explicitly propagate requested region
         tf::PropagateRequestedRegion<TInputImage>(inputPtr, patchRegion);
       }
-      tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, patchRegion, inputTensor, elem );
-      }
+      tf::RecopyImageRegionToTensorWithCast<TInputImage>(inputPtr, patchRegion, inputTensor, elem);
+    }
 
     // Input #i : the tensor of patches (aka the batch)
     DictElementType input = { this->GetInputPlaceholders()[i], inputTensor };
     inputs.push_back(input);
-    } // next input tensor
- }
+  } // next input tensor
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelTrain.h b/include/otbTensorflowMultisourceModelTrain.h
index 8ec4c38c..694f09e0 100644
--- a/include/otbTensorflowMultisourceModelTrain.h
+++ b/include/otbTensorflowMultisourceModelTrain.h
@@ -34,11 +34,9 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelTrain :
-public TensorflowMultisourceModelLearningBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelTrain : public TensorflowMultisourceModelLearningBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelTrain                     Self;
   typedef TensorflowMultisourceModelLearningBase<TInputImage> Superclass;
@@ -52,25 +50,27 @@ public:
   itkTypeMacro(TensorflowMultisourceModelTrain, TensorflowMultisourceModelLearningBase);
 
   /** Superclass typedefs */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::TensorListType    TensorListType;
-  typedef typename Superclass::IndexValueType    IndexValueType;
-  typedef typename Superclass::IndexListType     IndexListType;
+  typedef typename Superclass::DictType       DictType;
+  typedef typename Superclass::TensorListType TensorListType;
+  typedef typename Superclass::IndexValueType IndexValueType;
+  typedef typename Superclass::IndexListType  IndexListType;
 
 
 protected:
   TensorflowMultisourceModelTrain();
-  virtual ~TensorflowMultisourceModelTrain() {};
+  virtual ~TensorflowMultisourceModelTrain(){};
 
-  virtual void GenerateData();
-  virtual void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize);
+  virtual void
+  GenerateData();
+  virtual void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize);
 
 private:
-  TensorflowMultisourceModelTrain(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelTrain(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  IndexListType     m_RandomIndices;           // Reordered indices
+  IndexListType m_RandomIndices; // Reordered indices
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelTrain.hxx b/include/otbTensorflowMultisourceModelTrain.hxx
index 272dd639..46bc2d7b 100644
--- a/include/otbTensorflowMultisourceModelTrain.hxx
+++ b/include/otbTensorflowMultisourceModelTrain.hxx
@@ -18,37 +18,33 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelTrain<TInputImage>
-::TensorflowMultisourceModelTrain()
- {
- }
+TensorflowMultisourceModelTrain<TInputImage>::TensorflowMultisourceModelTrain()
+{}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelTrain<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelTrain<TInputImage>::GenerateData()
+{
 
   // Initial sequence 1...N
   m_RandomIndices.resize(this->GetNumberOfSamples());
-  std::iota (std::begin(m_RandomIndices), std::end(m_RandomIndices), 0);
+  std::iota(std::begin(m_RandomIndices), std::end(m_RandomIndices), 0);
 
   // Shuffle the sequence
   std::random_device rd;
-  std::mt19937 g(rd());
+  std::mt19937       g(rd());
   std::shuffle(m_RandomIndices.begin(), m_RandomIndices.end(), g);
 
   // Call the generic method
   Superclass::GenerateData();
-
- }
+}
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelTrain<TInputImage>
-::ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize)
- {
+TensorflowMultisourceModelTrain<TInputImage>::ProcessBatch(DictType &             inputs,
+                                                           const IndexValueType & sampleStart,
+                                                           const IndexValueType & batchSize)
+{
   // Populate input tensors
   this->PopulateInputTensors(inputs, sampleStart, batchSize, m_RandomIndices);
 
@@ -57,12 +53,11 @@ TensorflowMultisourceModelTrain<TInputImage>
   this->RunSession(inputs, outputs);
 
   // Display outputs tensors
-  for (auto& o: outputs)
+  for (auto & o : outputs)
   {
     tf::PrintTensorInfos(o);
   }
-
- }
+}
 
 
 } // end namespace otb
diff --git a/include/otbTensorflowMultisourceModelValidate.h b/include/otbTensorflowMultisourceModelValidate.h
index 322f6a24..54691747 100644
--- a/include/otbTensorflowMultisourceModelValidate.h
+++ b/include/otbTensorflowMultisourceModelValidate.h
@@ -42,11 +42,9 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage>
-class ITK_EXPORT TensorflowMultisourceModelValidate :
-public TensorflowMultisourceModelLearningBase<TInputImage>
+class ITK_EXPORT TensorflowMultisourceModelValidate : public TensorflowMultisourceModelLearningBase<TInputImage>
 {
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowMultisourceModelValidate                  Self;
   typedef TensorflowMultisourceModelLearningBase<TInputImage> Superclass;
@@ -60,20 +58,20 @@ public:
   itkTypeMacro(TensorflowMultisourceModelValidate, TensorflowMultisourceModelLearningBase);
 
   /** Images typedefs */
-  typedef typename Superclass::ImageType         ImageType;
-  typedef typename Superclass::ImagePointerType  ImagePointerType;
-  typedef typename Superclass::RegionType        RegionType;
-  typedef typename Superclass::SizeType          SizeType;
-  typedef typename Superclass::IndexType         IndexType;
-  typedef std::vector<ImagePointerType>          ImageListType;
+  typedef typename Superclass::ImageType        ImageType;
+  typedef typename Superclass::ImagePointerType ImagePointerType;
+  typedef typename Superclass::RegionType       RegionType;
+  typedef typename Superclass::SizeType         SizeType;
+  typedef typename Superclass::IndexType        IndexType;
+  typedef std::vector<ImagePointerType>         ImageListType;
 
   /* Typedefs for parameters */
-  typedef typename Superclass::DictType          DictType;
-  typedef typename Superclass::StringList        StringList;
-  typedef typename Superclass::SizeListType      SizeListType;
-  typedef typename Superclass::TensorListType    TensorListType;
-  typedef typename Superclass::IndexValueType    IndexValueType;
-  typedef typename Superclass::IndexListType     IndexListType;
+  typedef typename Superclass::DictType       DictType;
+  typedef typename Superclass::StringList     StringList;
+  typedef typename Superclass::SizeListType   SizeListType;
+  typedef typename Superclass::TensorListType TensorListType;
+  typedef typename Superclass::IndexValueType IndexValueType;
+  typedef typename Superclass::IndexListType  IndexListType;
 
   /* Typedefs for validation */
   typedef unsigned long                            CountValueType;
@@ -87,36 +85,43 @@ public:
   typedef itk::ImageRegionConstIterator<ImageType> IteratorType;
 
   /** Set and Get the input references */
-  virtual void SetInputReferences(ImageListType input);
-  ImagePointerType GetInputReference(unsigned int index);
+  virtual void
+  SetInputReferences(ImageListType input);
+  ImagePointerType
+  GetInputReference(unsigned int index);
 
   /** Get the confusion matrix */
-  const ConfMatType GetConfusionMatrix(unsigned int target);
+  const ConfMatType
+  GetConfusionMatrix(unsigned int target);
 
   /** Get the map of classes matrix */
-  const MapOfClassesType GetMapOfClasses(unsigned int target);
+  const MapOfClassesType
+  GetMapOfClasses(unsigned int target);
 
 protected:
   TensorflowMultisourceModelValidate();
-  virtual ~TensorflowMultisourceModelValidate() {};
+  virtual ~TensorflowMultisourceModelValidate(){};
 
-  void GenerateOutputInformation(void);
-  void GenerateData();
-  void ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-      const IndexValueType & batchSize);
+  void
+  GenerateOutputInformation(void);
+  void
+  GenerateData();
+  void
+  ProcessBatch(DictType & inputs, const IndexValueType & sampleStart, const IndexValueType & batchSize);
 
 private:
-  TensorflowMultisourceModelValidate(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowMultisourceModelValidate(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  ImageListType              m_References;              // The references images
+  ImageListType m_References; // The references images
 
   // Read only
-  ConfMatListType            m_ConfusionMatrices;       // Confusion matrix
-  MapOfClassesListType       m_MapsOfClasses;           // Maps of classes
+  ConfMatListType      m_ConfusionMatrices; // Confusion matrix
+  MapOfClassesListType m_MapsOfClasses;     // Maps of classes
 
   // Internal
-  std::vector<MatMapType>    m_ConfMatMaps;             // Accumulators
+  std::vector<MatMapType> m_ConfMatMaps; // Accumulators
 
 }; // end class
 
diff --git a/include/otbTensorflowMultisourceModelValidate.hxx b/include/otbTensorflowMultisourceModelValidate.hxx
index 8ec685ba..a929aa88 100644
--- a/include/otbTensorflowMultisourceModelValidate.hxx
+++ b/include/otbTensorflowMultisourceModelValidate.hxx
@@ -18,82 +18,77 @@ namespace otb
 {
 
 template <class TInputImage>
-TensorflowMultisourceModelValidate<TInputImage>
-::TensorflowMultisourceModelValidate()
- {
- }
+TensorflowMultisourceModelValidate<TInputImage>::TensorflowMultisourceModelValidate()
+{}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::GenerateOutputInformation()
- {
+TensorflowMultisourceModelValidate<TInputImage>::GenerateOutputInformation()
+{
   Superclass::GenerateOutputInformation();
 
   // Check that there is some reference
   const unsigned int nbOfRefs = m_References.size();
   if (nbOfRefs == 0)
-    {
+  {
     itkExceptionMacro("No reference is set");
-    }
+  }
 
   // Check the number of references
   SizeListType outputPatchSizes = this->GetOutputExpressionFields();
   if (nbOfRefs != outputPatchSizes.size())
-    {
-    itkExceptionMacro("There is " << nbOfRefs << " references but only " <<
-                      outputPatchSizes.size() << " output patch sizes");
-    }
+  {
+    itkExceptionMacro("There is " << nbOfRefs << " references but only " << outputPatchSizes.size()
+                                  << " output patch sizes");
+  }
 
   // Check reference image infos
-  for (unsigned int i = 0 ; i < nbOfRefs ; i++)
-    {
-    const SizeType outputPatchSize = outputPatchSizes[i];
+  for (unsigned int i = 0; i < nbOfRefs; i++)
+  {
+    const SizeType   outputPatchSize = outputPatchSizes[i];
     const RegionType refRegion = m_References[i]->GetLargestPossibleRegion();
     if (refRegion.GetSize(0) != outputPatchSize[0])
-      {
-      itkExceptionMacro("Reference image " << i << " width is " << refRegion.GetSize(0) <<
-                        " but patch size (x) is " << outputPatchSize[0]);
-      }
+    {
+      itkExceptionMacro("Reference image " << i << " width is " << refRegion.GetSize(0) << " but patch size (x) is "
+                                           << outputPatchSize[0]);
+    }
     if (refRegion.GetSize(1) != this->GetNumberOfSamples() * outputPatchSize[1])
-      {
-      itkExceptionMacro("Reference image " << i << " height is " << refRegion.GetSize(1) <<
-                        " but patch size (y) is " << outputPatchSize[1] <<
-                        " which is not consistent with the number of samples (" << this->GetNumberOfSamples() << ")");
-      }
+    {
+      itkExceptionMacro("Reference image "
+                        << i << " height is " << refRegion.GetSize(1) << " but patch size (y) is " << outputPatchSize[1]
+                        << " which is not consistent with the number of samples (" << this->GetNumberOfSamples()
+                        << ")");
     }
-
- }
+  }
+}
 
 
 /*
  * Set the references images
  */
-template<class TInputImage>
+template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::SetInputReferences(ImageListType input)
- {
+TensorflowMultisourceModelValidate<TInputImage>::SetInputReferences(ImageListType input)
+{
   m_References = input;
- }
+}
 
 /*
  * Retrieve the i-th reference image
  * An exception is thrown if it doesn't exist.
  */
-template<class TInputImage>
+template <class TInputImage>
 typename TensorflowMultisourceModelValidate<TInputImage>::ImagePointerType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetInputReference(unsigned int index)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetInputReference(unsigned int index)
+{
   if (m_References.size <= index || !m_References[index])
-    {
+  {
     itkExceptionMacro("There is no input reference #" << index);
-    }
+  }
 
   return m_References[index];
- }
+}
 
 /**
  * Perform the validation
@@ -103,73 +98,70 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::GenerateData()
- {
+TensorflowMultisourceModelValidate<TInputImage>::GenerateData()
+{
 
   // Temporary images for outputs
   m_ConfusionMatrices.clear();
   m_MapsOfClasses.clear();
   m_ConfMatMaps.clear();
-  for (auto const& ref: m_References)
-    {
-    (void) ref;
+  for (auto const & ref : m_References)
+  {
+    (void)ref;
 
     // New confusion matrix
     MatMapType mat;
     m_ConfMatMaps.push_back(mat);
-    }
+  }
 
   // Run all the batches
   Superclass::GenerateData();
 
   // Compute confusion matrices
-  for (unsigned int i = 0 ; i < m_ConfMatMaps.size() ; i++)
-    {
+  for (unsigned int i = 0; i < m_ConfMatMaps.size(); i++)
+  {
     // Confusion matrix (map) for current target
     MatMapType mat = m_ConfMatMaps[i];
 
     // List all values
     MapOfClassesType values;
-    LabelValueType curVal = 0;
-    for (auto const& ref: mat)
-      {
+    LabelValueType   curVal = 0;
+    for (auto const & ref : mat)
+    {
       if (values.count(ref.first) == 0)
-        {
+      {
         values[ref.first] = curVal;
         curVal++;
-        }
-      for (auto const& in: ref.second)
+      }
+      for (auto const & in : ref.second)
         if (values.count(in.first) == 0)
-          {
+        {
           values[in.first] = curVal;
           curVal++;
-          }
-      }
+        }
+    }
 
     // Build the confusion matrix
     const LabelValueType nValues = values.size();
-    ConfMatType matrix(nValues, nValues);
+    ConfMatType          matrix(nValues, nValues);
     matrix.Fill(0);
-    for (auto const& ref: mat)
-      for (auto const& in: ref.second)
+    for (auto const & ref : mat)
+      for (auto const & in : ref.second)
         matrix[values[ref.first]][values[in.first]] = in.second;
 
     // Add the confusion matrix
     m_ConfusionMatrices.push_back(matrix);
     m_MapsOfClasses.push_back(values);
-
-    }
-
- }
+  }
+}
 
 
 template <class TInputImage>
 void
-TensorflowMultisourceModelValidate<TInputImage>
-::ProcessBatch(DictType & inputs, const IndexValueType & sampleStart,
-    const IndexValueType & batchSize)
- {
+TensorflowMultisourceModelValidate<TInputImage>::ProcessBatch(DictType &             inputs,
+                                                              const IndexValueType & sampleStart,
+                                                              const IndexValueType & batchSize)
+{
   // Populate input tensors
   IndexListType empty;
   this->PopulateInputTensors(inputs, sampleStart, batchSize, empty);
@@ -180,16 +172,16 @@ TensorflowMultisourceModelValidate<TInputImage>
 
   // Perform the validation
   if (outputs.size() != m_References.size())
-    {
-    itkWarningMacro("There is " << outputs.size() << " outputs returned after session run, " <<
-                    "but only " << m_References.size() << " reference(s) set");
-    }
+  {
+    itkWarningMacro("There is " << outputs.size() << " outputs returned after session run, "
+                                << "but only " << m_References.size() << " reference(s) set");
+  }
   SizeListType outputEFSizes = this->GetOutputExpressionFields();
-  for (unsigned int refIdx = 0 ; refIdx < outputs.size() ; refIdx++)
-    {
+  for (unsigned int refIdx = 0; refIdx < outputs.size(); refIdx++)
+  {
     // Recopy the chunk
     const SizeType outputFOESize = outputEFSizes[refIdx];
-    IndexType cpyStart;
+    IndexType      cpyStart;
     cpyStart.Fill(0);
     IndexType refRegStart;
     refRegStart.Fill(0);
@@ -216,31 +208,30 @@ TensorflowMultisourceModelValidate<TInputImage>
     IteratorType inIt(img, cpyRegion);
     IteratorType refIt(m_References[refIdx], refRegion);
     for (inIt.GoToBegin(), refIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt, ++refIt)
-      {
+    {
       const int classIn = static_cast<LabelValueType>(inIt.Get()[0]);
       const int classRef = static_cast<LabelValueType>(refIt.Get()[0]);
 
       if (m_ConfMatMaps[refIdx].count(classRef) == 0)
-        {
+      {
         MapType newMap;
         newMap[classIn] = 1;
         m_ConfMatMaps[refIdx][classRef] = newMap;
-        }
+      }
       else
-        {
+      {
         if (m_ConfMatMaps[refIdx][classRef].count(classIn) == 0)
-          {
+        {
           m_ConfMatMaps[refIdx][classRef][classIn] = 1;
-          }
+        }
         else
-          {
+        {
           m_ConfMatMaps[refIdx][classRef][classIn]++;
-          }
         }
       }
     }
-
- }
+  }
+}
 
 /*
  * Get the confusion matrix
@@ -248,17 +239,17 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 const typename TensorflowMultisourceModelValidate<TInputImage>::ConfMatType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetConfusionMatrix(unsigned int target)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetConfusionMatrix(unsigned int target)
+{
   if (target >= m_ConfusionMatrices.size())
-    {
-    itkExceptionMacro("Unable to get confusion matrix #" << target << ". " <<
-        "There is only " << m_ConfusionMatrices.size() << " available.");
-    }
+  {
+    itkExceptionMacro("Unable to get confusion matrix #" << target << ". "
+                                                         << "There is only " << m_ConfusionMatrices.size()
+                                                         << " available.");
+  }
 
   return m_ConfusionMatrices[target];
- }
+}
 
 /*
  * Get the map of classes
@@ -266,17 +257,17 @@ TensorflowMultisourceModelValidate<TInputImage>
  */
 template <class TInputImage>
 const typename TensorflowMultisourceModelValidate<TInputImage>::MapOfClassesType
-TensorflowMultisourceModelValidate<TInputImage>
-::GetMapOfClasses(unsigned int target)
- {
+TensorflowMultisourceModelValidate<TInputImage>::GetMapOfClasses(unsigned int target)
+{
   if (target >= m_MapsOfClasses.size())
-    {
-    itkExceptionMacro("Unable to get confusion matrix #" << target << ". " <<
-        "There is only " << m_MapsOfClasses.size() << " available.");
-    }
+  {
+    itkExceptionMacro("Unable to get confusion matrix #" << target << ". "
+                                                         << "There is only " << m_MapsOfClasses.size()
+                                                         << " available.");
+  }
 
   return m_MapsOfClasses[target];
- }
+}
 
 } // end namespace otb
 
diff --git a/include/otbTensorflowSampler.h b/include/otbTensorflowSampler.h
index bd363bc8..4fae38e7 100644
--- a/include/otbTensorflowSampler.h
+++ b/include/otbTensorflowSampler.h
@@ -52,16 +52,14 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TVectorData>
-class ITK_EXPORT TensorflowSampler :
-public itk::ProcessObject
+class ITK_EXPORT TensorflowSampler : public itk::ProcessObject
 {
 public:
-
   /** Standard class typedefs. */
-  typedef TensorflowSampler                       Self;
-  typedef itk::ProcessObject                      Superclass;
-  typedef itk::SmartPointer<Self>                 Pointer;
-  typedef itk::SmartPointer<const Self>           ConstPointer;
+  typedef TensorflowSampler             Self;
+  typedef itk::ProcessObject            Superclass;
+  typedef itk::SmartPointer<Self>       Pointer;
+  typedef itk::SmartPointer<const Self> ConstPointer;
 
   /** Method for creation through the object factory. */
   itkNewMacro(Self);
@@ -70,33 +68,28 @@ public:
   itkTypeMacro(TensorflowSampler, itk::ProcessObject);
 
   /** Images typedefs */
-  typedef TInputImage                             ImageType;
-  typedef typename TInputImage::Pointer           ImagePointerType;
-  typedef typename TInputImage::InternalPixelType InternalPixelType;
-  typedef typename TInputImage::PixelType         PixelType;
-  typedef typename TInputImage::RegionType        RegionType;
-  typedef typename TInputImage::PointType         PointType;
-  typedef typename TInputImage::SizeType          SizeType;
-  typedef typename TInputImage::IndexType         IndexType;
-  typedef typename otb::MultiChannelExtractROI<InternalPixelType,
-      InternalPixelType>                          ExtractROIMultiFilterType;
-  typedef typename ExtractROIMultiFilterType::Pointer
-                                                  ExtractROIMultiFilterPointerType;
-  typedef typename std::vector<ImagePointerType>  ImagePointerListType;
-  typedef typename std::vector<SizeType>          SizeListType;
-  typedef typename itk::ImageRegionConstIterator<ImageType>
-                                                  IteratorType;
+  typedef TInputImage                                                                ImageType;
+  typedef typename TInputImage::Pointer                                              ImagePointerType;
+  typedef typename TInputImage::InternalPixelType                                    InternalPixelType;
+  typedef typename TInputImage::PixelType                                            PixelType;
+  typedef typename TInputImage::RegionType                                           RegionType;
+  typedef typename TInputImage::PointType                                            PointType;
+  typedef typename TInputImage::SizeType                                             SizeType;
+  typedef typename TInputImage::IndexType                                            IndexType;
+  typedef typename otb::MultiChannelExtractROI<InternalPixelType, InternalPixelType> ExtractROIMultiFilterType;
+  typedef typename ExtractROIMultiFilterType::Pointer                                ExtractROIMultiFilterPointerType;
+  typedef typename std::vector<ImagePointerType>                                     ImagePointerListType;
+  typedef typename std::vector<SizeType>                                             SizeListType;
+  typedef typename itk::ImageRegionConstIterator<ImageType>                          IteratorType;
 
   /** Vector data typedefs */
-  typedef TVectorData                             VectorDataType;
-  typedef typename VectorDataType::Pointer        VectorDataPointer;
-  typedef typename VectorDataType::DataTreeType   DataTreeType;
-  typedef typename itk::PreOrderTreeIterator<DataTreeType>
-                                                  TreeIteratorType;
-  typedef typename VectorDataType::DataNodeType   DataNodeType;
-  typedef typename DataNodeType::Pointer          DataNodePointer;
-  typedef typename DataNodeType::PolygonListPointerType
-                                                  PolygonListPointerType;
+  typedef TVectorData                                      VectorDataType;
+  typedef typename VectorDataType::Pointer                 VectorDataPointer;
+  typedef typename VectorDataType::DataTreeType            DataTreeType;
+  typedef typename itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
+  typedef typename VectorDataType::DataNodeType            DataNodeType;
+  typedef typename DataNodeType::Pointer                   DataNodePointer;
+  typedef typename DataNodeType::PolygonListPointerType    PolygonListPointerType;
 
   /** Set / get parameters */
   itkSetMacro(Field, std::string);
@@ -107,15 +100,18 @@ public:
   itkGetConstMacro(InputVectorData, VectorDataPointer);
 
   /** Set / get image */
-  virtual void PushBackInputWithPatchSize(const ImageType *input, SizeType & patchSize, InternalPixelType nodataval);
-  const ImageType* GetInput(unsigned int index);
+  virtual void
+  PushBackInputWithPatchSize(const ImageType * input, SizeType & patchSize, InternalPixelType nodataval);
+  const ImageType *
+  GetInput(unsigned int index);
 
   /** Set / get no-data related parameters */
   itkSetMacro(RejectPatchesWithNodata, bool);
   itkGetMacro(RejectPatchesWithNodata, bool);
 
   /** Do the real work */
-  virtual void Update();
+  virtual void
+  Update();
 
   /** Get outputs */
   itkGetMacro(OutputPatchImages, ImagePointerListType);
@@ -125,18 +121,21 @@ public:
 
 protected:
   TensorflowSampler();
-  virtual ~TensorflowSampler() {};
+  virtual ~TensorflowSampler(){};
 
-  virtual void ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples);
-  virtual void AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents);
+  virtual void
+  ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples);
+  virtual void
+  AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents);
 
 private:
-  TensorflowSampler(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowSampler(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  std::string          m_Field;
-  SizeListType         m_PatchSizes;
-  VectorDataPointer    m_InputVectorData;
+  std::string       m_Field;
+  SizeListType      m_PatchSizes;
+  VectorDataPointer m_InputVectorData;
 
   // Read only
   ImagePointerListType m_OutputPatchImages;
@@ -146,7 +145,7 @@ private:
 
   // No data stuff
   std::vector<InternalPixelType> m_NoDataValues;
-  bool                 m_RejectPatchesWithNodata;
+  bool                           m_RejectPatchesWithNodata;
 
 }; // end class
 
diff --git a/include/otbTensorflowSampler.hxx b/include/otbTensorflowSampler.hxx
index 8c0ea745..77558c7b 100644
--- a/include/otbTensorflowSampler.hxx
+++ b/include/otbTensorflowSampler.hxx
@@ -18,36 +18,35 @@ namespace otb
 {
 
 template <class TInputImage, class TVectorData>
-TensorflowSampler<TInputImage, TVectorData>
-::TensorflowSampler()
- {
+TensorflowSampler<TInputImage, TVectorData>::TensorflowSampler()
+{
   m_NumberOfAcceptedSamples = 0;
   m_NumberOfRejectedSamples = 0;
   m_RejectPatchesWithNodata = false;
- }
+}
 
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::PushBackInputWithPatchSize(const ImageType *input, SizeType & patchSize, InternalPixelType nodataval)
- {
-  this->ProcessObject::PushBackInput(const_cast<ImageType*>(input));
+TensorflowSampler<TInputImage, TVectorData>::PushBackInputWithPatchSize(const ImageType * input,
+                                                                        SizeType &        patchSize,
+                                                                        InternalPixelType nodataval)
+{
+  this->ProcessObject::PushBackInput(const_cast<ImageType *>(input));
   m_PatchSizes.push_back(patchSize);
   m_NoDataValues.push_back(nodataval);
- }
+}
 
 template <class TInputImage, class TVectorData>
-const TInputImage*
-TensorflowSampler<TInputImage, TVectorData>
-::GetInput(unsigned int index)
- {
+const TInputImage *
+TensorflowSampler<TInputImage, TVectorData>::GetInput(unsigned int index)
+{
   if (this->GetNumberOfInputs() < 1)
   {
     itkExceptionMacro("Input not set");
   }
 
-  return static_cast<const ImageType*>(this->ProcessObject::GetInput(index));
- }
+  return static_cast<const ImageType *>(this->ProcessObject::GetInput(index));
+}
 
 
 /**
@@ -55,9 +54,10 @@ TensorflowSampler<TInputImage, TVectorData>
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::ResizeImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples)
- {
+TensorflowSampler<TInputImage, TVectorData>::ResizeImage(ImagePointerType & image,
+                                                         SizeType &         patchSize,
+                                                         unsigned int       nbSamples)
+{
   // New image region
   RegionType region;
   region.SetSize(0, patchSize[0]);
@@ -71,16 +71,18 @@ TensorflowSampler<TInputImage, TVectorData>
 
   // Assign
   image = resizer->GetOutput();
- }
+}
 
 /**
  * Allocate an image given a patch size and a number of samples
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::AllocateImage(ImagePointerType & image, SizeType & patchSize, unsigned int nbSamples, unsigned int nbComponents)
- {
+TensorflowSampler<TInputImage, TVectorData>::AllocateImage(ImagePointerType & image,
+                                                           SizeType &         patchSize,
+                                                           unsigned int       nbSamples,
+                                                           unsigned int       nbComponents)
+{
   // Image region
   RegionType region;
   region.SetSize(0, patchSize[0]);
@@ -91,16 +93,15 @@ TensorflowSampler<TInputImage, TVectorData>
   image->SetNumberOfComponentsPerPixel(nbComponents);
   image->SetRegions(region);
   image->Allocate();
- }
+}
 
 /**
  * Do the work
  */
 template <class TInputImage, class TVectorData>
 void
-TensorflowSampler<TInputImage, TVectorData>
-::Update()
- {
+TensorflowSampler<TInputImage, TVectorData>::Update()
+{
 
   // Check number of inputs
   if (this->GetNumberOfInputs() != m_PatchSizes.size())
@@ -109,8 +110,8 @@ TensorflowSampler<TInputImage, TVectorData>
   }
 
   // Count points
-  unsigned int nTotal = 0;
-  unsigned int geomId = 0;
+  unsigned int     nTotal = 0;
+  unsigned int     geomId = 0;
   TreeIteratorType itVector(m_InputVectorData->GetDataTree());
   itVector.GoToBegin();
   while (!itVector.IsAtEnd())
@@ -146,7 +147,7 @@ TensorflowSampler<TInputImage, TVectorData>
   const unsigned int nbInputs = this->GetNumberOfInputs();
   m_OutputPatchImages.clear();
   m_OutputPatchImages.reserve(nbInputs);
-  for (unsigned int i = 0 ; i < nbInputs ; i++)
+  for (unsigned int i = 0; i < nbInputs; i++)
   {
     ImagePointerType newImage;
     AllocateImage(newImage, m_PatchSizes[i], nTotal, GetInput(i)->GetNumberOfComponentsPerPixel());
@@ -160,7 +161,7 @@ TensorflowSampler<TInputImage, TVectorData>
   itVector.GoToBegin();
   unsigned long count = 0;
   unsigned long rejected = 0;
-  IndexType labelIndex;
+  IndexType     labelIndex;
   labelIndex[0] = 0;
   PixelType labelPix;
   labelPix.SetSize(1);
@@ -169,13 +170,13 @@ TensorflowSampler<TInputImage, TVectorData>
     if (!itVector.Get()->IsRoot() && !itVector.Get()->IsDocument() && !itVector.Get()->IsFolder())
     {
       DataNodePointer currentGeometry = itVector.Get();
-      PointType point = currentGeometry->GetPoint();
+      PointType       point = currentGeometry->GetPoint();
 
       // Get the label value
       labelPix[0] = static_cast<InternalPixelType>(currentGeometry->GetFieldAsInt(m_Field));
 
       bool hasBeenSampled = true;
-      for (unsigned int i = 0 ; i < nbInputs ; i++)
+      for (unsigned int i = 0; i < nbInputs; i++)
       {
         // Get input
         ImagePointerType inputPtr = const_cast<ImageType *>(this->GetInput(i));
@@ -188,7 +189,7 @@ TensorflowSampler<TInputImage, TVectorData>
         }
         // Check if the sampled patch contains a no-data value
         if (m_RejectPatchesWithNodata && hasBeenSampled)
-          {
+        {
           IndexType outIndex;
           outIndex[0] = 0;
           outIndex[1] = count * m_PatchSizes[i][1];
@@ -196,13 +197,13 @@ TensorflowSampler<TInputImage, TVectorData>
 
           IteratorType it(m_OutputPatchImages[i], region);
           for (it.GoToBegin(); !it.IsAtEnd(); ++it)
-            {
+          {
             PixelType pix = it.Get();
-            for (unsigned int band = 0 ; band < pix.Size() ; band++)
+            for (unsigned int band = 0; band < pix.Size(); band++)
               if (pix[band] == m_NoDataValues[i])
                 hasBeenSampled = false;
-            }
           }
+        }
       } // Next input
       if (hasBeenSampled)
       {
@@ -220,7 +221,6 @@ TensorflowSampler<TInputImage, TVectorData>
 
       // Update progress
       progress.CompletedPixel();
-
     }
 
     ++itVector;
@@ -228,7 +228,7 @@ TensorflowSampler<TInputImage, TVectorData>
 
   // Resize output images
   ResizeImage(m_OutputLabelImage, labelPatchSize, count);
-  for (unsigned int i = 0 ; i < nbInputs ; i++)
+  for (unsigned int i = 0; i < nbInputs; i++)
   {
     ResizeImage(m_OutputPatchImages[i], m_PatchSizes[i], count);
   }
@@ -236,8 +236,7 @@ TensorflowSampler<TInputImage, TVectorData>
   // Update number of samples produced
   m_NumberOfAcceptedSamples = count;
   m_NumberOfRejectedSamples = rejected;
-
- }
+}
 
 } // end namespace otb
 
diff --git a/include/otbTensorflowSamplingUtils.cxx b/include/otbTensorflowSamplingUtils.cxx
index 5cf88f6b..db4d9ea0 100644
--- a/include/otbTensorflowSamplingUtils.cxx
+++ b/include/otbTensorflowSamplingUtils.cxx
@@ -19,13 +19,15 @@ namespace tf
 //
 // Update the distribution of the patch located at the specified location
 //
-template<class TImage, class TDistribution>
-bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
-    typename TImage::PointType point, typename TImage::SizeType patchSize,
-    TDistribution & dist)
+template <class TImage, class TDistribution>
+bool
+UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
+                            typename TImage::PointType     point,
+                            typename TImage::SizeType      patchSize,
+                            TDistribution &                dist)
 {
   typename TImage::IndexType index;
-  bool canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
+  bool                       canTransform = inPtr->TransformPhysicalPointToIndex(point, index);
   if (canTransform)
   {
     index[0] -= patchSize[0] / 2;
@@ -38,7 +40,7 @@ bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
       // Fill patch
       PropagateRequestedRegion<TImage>(inPtr, inPatchRegion);
 
-      typename itk::ImageRegionConstIterator<TImage> inIt (inPtr, inPatchRegion);
+      typename itk::ImageRegionConstIterator<TImage> inIt(inPtr, inPatchRegion);
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
         dist.Update(inIt.Get());
@@ -47,7 +49,6 @@ bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
     }
   }
   return false;
-
 }
 
 
diff --git a/include/otbTensorflowSamplingUtils.h b/include/otbTensorflowSamplingUtils.h
index 585f9013..846b7131 100644
--- a/include/otbTensorflowSamplingUtils.h
+++ b/include/otbTensorflowSamplingUtils.h
@@ -20,77 +20,89 @@ namespace otb
 namespace tf
 {
 
-template<class TImage>
+template <class TImage>
 class Distribution
 {
 public:
   typedef typename TImage::PixelType ValueType;
-  typedef vnl_vector<float> CountsType;
-
-  explicit Distribution(unsigned int nClasses): m_NbOfClasses(nClasses), m_Dist(CountsType(nClasses, 0))
-  {
-  }
-  Distribution(unsigned int nClasses, float fillValue): m_NbOfClasses(nClasses), m_Dist(CountsType(nClasses, fillValue))
-  {
-  }
-  Distribution(): m_NbOfClasses(2), m_Dist(CountsType(m_NbOfClasses, 0))
-  {
-  }
-  Distribution(const Distribution & other): m_Dist(other.Get()), m_NbOfClasses(m_Dist.size())
-  {
-  }
-  ~Distribution(){}
-
-  void Update(const typename TImage::PixelType & pixel)
+  typedef vnl_vector<float>          CountsType;
+
+  explicit Distribution(unsigned int nClasses)
+    : m_NbOfClasses(nClasses)
+    , m_Dist(CountsType(nClasses, 0))
+  {}
+  Distribution(unsigned int nClasses, float fillValue)
+    : m_NbOfClasses(nClasses)
+    , m_Dist(CountsType(nClasses, fillValue))
+  {}
+  Distribution()
+    : m_NbOfClasses(2)
+    , m_Dist(CountsType(m_NbOfClasses, 0))
+  {}
+  Distribution(const Distribution & other)
+    : m_Dist(other.Get())
+    , m_NbOfClasses(m_Dist.size())
+  {}
+  ~Distribution() {}
+
+  void
+  Update(const typename TImage::PixelType & pixel)
   {
     m_Dist[pixel]++;
   }
 
-  void Update(const Distribution & other)
+  void
+  Update(const Distribution & other)
   {
     const CountsType otherDist = other.Get();
-    for (unsigned int c = 0 ; c < m_NbOfClasses ; c++)
+    for (unsigned int c = 0; c < m_NbOfClasses; c++)
       m_Dist[c] += otherDist[c];
   }
 
-  CountsType Get() const
+  CountsType
+  Get() const
   {
     return m_Dist;
   }
 
-  CountsType GetNormalized() const
+  CountsType
+  GetNormalized() const
   {
-    const float invNorm = 1.0 / std::sqrt(dot_product(m_Dist, m_Dist));
+    const float      invNorm = 1.0 / std::sqrt(dot_product(m_Dist, m_Dist));
     const CountsType normalizedDist = invNorm * m_Dist;
     return normalizedDist;
   }
 
-  float Cosinus(const Distribution & other) const
+  float
+  Cosinus(const Distribution & other) const
   {
     return dot_product(other.GetNormalized(), GetNormalized());
   }
 
-  std::string ToString()
+  std::string
+  ToString()
   {
     std::stringstream ss;
     ss << "\n";
-    for (unsigned int c = 0 ; c < m_NbOfClasses ; c++)
+    for (unsigned int c = 0; c < m_NbOfClasses; c++)
       ss << "\tClass #" << c << " : " << m_Dist[c] << "\n";
     return ss.str();
   }
 
 private:
   unsigned int m_NbOfClasses;
-  CountsType m_Dist;
+  CountsType   m_Dist;
 };
 
 // Update the distribution of the patch located at the specified location
-template<class TImage, class TDistribution>
-bool UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
-    typename TImage::PointType point, typename TImage::SizeType patchSize,
-    TDistribution & dist);
-
-} // namesapce tf
+template <class TImage, class TDistribution>
+bool
+UpdateDistributionFromPatch(const typename TImage::Pointer inPtr,
+                            typename TImage::PointType     point,
+                            typename TImage::SizeType      patchSize,
+                            TDistribution &                dist);
+
+} // namespace tf
 } // namespace otb
 
 #include "otbTensorflowSamplingUtils.cxx"
diff --git a/include/otbTensorflowSource.h b/include/otbTensorflowSource.h
index 1556997f..9bbeed12 100644
--- a/include/otbTensorflowSource.h
+++ b/include/otbTensorflowSource.h
@@ -29,45 +29,43 @@ namespace otb
  * Images must have the same size.
  * This is the common input type used in every OTB-TF applications.
  */
-template<class TImage>
+template <class TImage>
 class TensorflowSource
 {
 public:
   /** Typedefs for images */
-  typedef TImage                                            FloatVectorImageType;
-  typedef typename FloatVectorImageType::Pointer            FloatVectorImagePointerType;
-  typedef typename FloatVectorImageType::InternalPixelType  InternalPixelType;
-  typedef otb::Image<InternalPixelType>                     FloatImageType;
-  typedef typename FloatImageType::SizeType                 SizeType;
+  typedef TImage                                           FloatVectorImageType;
+  typedef typename FloatVectorImageType::Pointer           FloatVectorImagePointerType;
+  typedef typename FloatVectorImageType::InternalPixelType InternalPixelType;
+  typedef otb::Image<InternalPixelType>                    FloatImageType;
+  typedef typename FloatImageType::SizeType                SizeType;
 
   /** Typedefs for image concatenation */
-  typedef otb::ImageList<FloatImageType>                    ImageListType;
-  typedef typename ImageListType::Pointer                   ImageListPointer;
-  typedef ImageListToVectorImageFilter<ImageListType,
-      FloatVectorImageType>                                 ListConcatenerFilterType;
-  typedef typename ListConcatenerFilterType::Pointer        ListConcatenerFilterPointer;
-  typedef MultiToMonoChannelExtractROI<InternalPixelType,
-      InternalPixelType>                                    MultiToMonoChannelFilterType;
-  typedef ObjectList<MultiToMonoChannelFilterType>          ExtractROIFilterListType;
-  typedef typename ExtractROIFilterListType::Pointer        ExtractROIFilterListPointer;
-  typedef otb::MultiChannelExtractROI<InternalPixelType,
-      InternalPixelType>                                    ExtractFilterType;
-  typedef otb::ObjectList<FloatVectorImageType>             FloatVectorImageListType;
+  typedef otb::ImageList<FloatImageType>                                     ImageListType;
+  typedef typename ImageListType::Pointer                                    ImageListPointer;
+  typedef ImageListToVectorImageFilter<ImageListType, FloatVectorImageType>  ListConcatenerFilterType;
+  typedef typename ListConcatenerFilterType::Pointer                         ListConcatenerFilterPointer;
+  typedef MultiToMonoChannelExtractROI<InternalPixelType, InternalPixelType> MultiToMonoChannelFilterType;
+  typedef ObjectList<MultiToMonoChannelFilterType>                           ExtractROIFilterListType;
+  typedef typename ExtractROIFilterListType::Pointer                         ExtractROIFilterListPointer;
+  typedef otb::MultiChannelExtractROI<InternalPixelType, InternalPixelType>  ExtractFilterType;
+  typedef otb::ObjectList<FloatVectorImageType>                              FloatVectorImageListType;
 
   // Initialize the source
-  void Set(FloatVectorImageListType * inputList);
+  void
+  Set(FloatVectorImageListType * inputList);
 
   // Get the source output
-  FloatVectorImagePointerType Get();
+  FloatVectorImagePointerType
+  Get();
 
   TensorflowSource();
-  virtual ~TensorflowSource (){};
+  virtual ~TensorflowSource(){};
 
 private:
   ListConcatenerFilterPointer m_Concatener;    // Mono-images stacker
   ImageListPointer            m_List;          // List of mono-images
   ExtractROIFilterListPointer m_ExtractorList; // Mono-images extractors
-
 };
 
 } // end namespace otb
diff --git a/include/otbTensorflowSource.hxx b/include/otbTensorflowSource.hxx
index 2ad57586..2e41253c 100644
--- a/include/otbTensorflowSource.hxx
+++ b/include/otbTensorflowSource.hxx
@@ -21,8 +21,7 @@ namespace otb
 // Constructor
 //
 template <class TImage>
-TensorflowSource<TImage>
-::TensorflowSource()
+TensorflowSource<TImage>::TensorflowSource()
 {}
 
 //
@@ -30,40 +29,38 @@ TensorflowSource<TImage>
 //
 template <class TImage>
 void
-TensorflowSource<TImage>
-::Set(FloatVectorImageListType * inputList)
+TensorflowSource<TImage>::Set(FloatVectorImageListType * inputList)
 {
   // Create one stack for input images list
-  m_Concatener    = ListConcatenerFilterType::New();
-  m_List          = ImageListType::New();
+  m_Concatener = ListConcatenerFilterType::New();
+  m_List = ImageListType::New();
   m_ExtractorList = ExtractROIFilterListType::New();
 
   // Split each input vector image into image
   // and generate an mono channel image list
   inputList->GetNthElement(0)->UpdateOutputInformation();
   SizeType size = inputList->GetNthElement(0)->GetLargestPossibleRegion().GetSize();
-  for( unsigned int i = 0; i < inputList->Size(); i++ )
+  for (unsigned int i = 0; i < inputList->Size(); i++)
   {
     FloatVectorImagePointerType vectIm = inputList->GetNthElement(i);
     vectIm->UpdateOutputInformation();
-    if( size != vectIm->GetLargestPossibleRegion().GetSize() )
+    if (size != vectIm->GetLargestPossibleRegion().GetSize())
     {
       itkGenericExceptionMacro("Input image size number " << i << " mismatch");
     }
 
-    for( unsigned int j = 0; j < vectIm->GetNumberOfComponentsPerPixel(); j++)
+    for (unsigned int j = 0; j < vectIm->GetNumberOfComponentsPerPixel(); j++)
     {
       typename MultiToMonoChannelFilterType::Pointer extractor = MultiToMonoChannelFilterType::New();
-      extractor->SetInput( vectIm );
-      extractor->SetChannel( j+1 );
+      extractor->SetInput(vectIm);
+      extractor->SetChannel(j + 1);
       extractor->UpdateOutputInformation();
-      m_ExtractorList->PushBack( extractor );
-      m_List->PushBack( extractor->GetOutput() );
+      m_ExtractorList->PushBack(extractor);
+      m_List->PushBack(extractor->GetOutput());
     }
   }
-  m_Concatener->SetInput( m_List );
+  m_Concatener->SetInput(m_List);
   m_Concatener->UpdateOutputInformation();
-
 }
 
 //
diff --git a/include/otbTensorflowStreamerFilter.h b/include/otbTensorflowStreamerFilter.h
index 4730d369..fa985d00 100644
--- a/include/otbTensorflowStreamerFilter.h
+++ b/include/otbTensorflowStreamerFilter.h
@@ -26,12 +26,10 @@ namespace otb
  * \ingroup OTBTensorflow
  */
 template <class TInputImage, class TOutputImage>
-class ITK_EXPORT TensorflowStreamerFilter :
-public itk::ImageToImageFilter<TInputImage, TOutputImage>
+class ITK_EXPORT TensorflowStreamerFilter : public itk::ImageToImageFilter<TInputImage, TOutputImage>
 {
 
 public:
-
   /** Standard class typedefs. */
   typedef TensorflowStreamerFilter                           Self;
   typedef itk::ImageToImageFilter<TInputImage, TOutputImage> Superclass;
@@ -51,24 +49,31 @@ public:
   typedef typename ImageType::SizeType              SizeType;
   typedef typename Superclass::InputImageRegionType RegionType;
 
-  typedef TOutputImage                             OutputImageType;
+  typedef TOutputImage OutputImageType;
 
   itkSetMacro(OutputGridSize, SizeType);
   itkGetMacro(OutputGridSize, SizeType);
 
 protected:
   TensorflowStreamerFilter();
-  virtual ~TensorflowStreamerFilter() {};
+  virtual ~TensorflowStreamerFilter(){};
 
-  virtual void UpdateOutputData(itk::DataObject *output){(void) output; this->GenerateData();}
+  virtual void
+  UpdateOutputData(itk::DataObject * output)
+  {
+    (void)output;
+    this->GenerateData();
+  }
 
-  virtual void GenerateData();
+  virtual void
+  GenerateData();
 
 private:
-  TensorflowStreamerFilter(const Self&); //purposely not implemented
-  void operator=(const Self&); //purposely not implemented
+  TensorflowStreamerFilter(const Self &); // purposely not implemented
+  void
+  operator=(const Self &); // purposely not implemented
 
-  SizeType                   m_OutputGridSize;       // Output grid size
+  SizeType m_OutputGridSize; // Output grid size
 
 }; // end class
 
diff --git a/include/otbTensorflowStreamerFilter.hxx b/include/otbTensorflowStreamerFilter.hxx
index 59904a54..3aa1afca 100644
--- a/include/otbTensorflowStreamerFilter.hxx
+++ b/include/otbTensorflowStreamerFilter.hxx
@@ -19,30 +19,28 @@ namespace otb
 {
 
 template <class TInputImage, class TOutputImage>
-TensorflowStreamerFilter<TInputImage, TOutputImage>
-::TensorflowStreamerFilter()
- {
+TensorflowStreamerFilter<TInputImage, TOutputImage>::TensorflowStreamerFilter()
+{
   m_OutputGridSize.Fill(1);
- }
+}
 
 /**
  * Compute the output image
  */
 template <class TInputImage, class TOutputImage>
 void
-TensorflowStreamerFilter<TInputImage, TOutputImage>
-::GenerateData()
- {
+TensorflowStreamerFilter<TInputImage, TOutputImage>::GenerateData()
+{
   // Output pointer and requested region
   OutputImageType * outputPtr = this->GetOutput();
-  const RegionType outputReqRegion = outputPtr->GetRequestedRegion();
+  const RegionType  outputReqRegion = outputPtr->GetRequestedRegion();
   outputPtr->SetBufferedRegion(outputReqRegion);
   outputPtr->Allocate();
 
   // Compute the aligned region
   RegionType region;
-  for(unsigned int dim = 0; dim<OutputImageType::ImageDimension; ++dim)
-    {
+  for (unsigned int dim = 0; dim < OutputImageType::ImageDimension; ++dim)
+  {
     // Get corners
     IndexValueType lower = outputReqRegion.GetIndex(dim);
     IndexValueType upper = lower + outputReqRegion.GetSize(dim);
@@ -54,35 +52,34 @@ TensorflowStreamerFilter<TInputImage, TOutputImage>
     // Move corners to aligned positions
     lower -= deltaLo;
     if (deltaUp > 0)
-      {
+    {
       upper += m_OutputGridSize[dim] - deltaUp;
-      }
+    }
 
     // Update region
     region.SetIndex(dim, lower);
     region.SetSize(dim, upper - lower);
-
-    }
+  }
 
   // Compute the number of subregions to process
   const unsigned int nbTilesX = region.GetSize(0) / m_OutputGridSize[0];
   const unsigned int nbTilesY = region.GetSize(1) / m_OutputGridSize[1];
 
   // Progress
-  itk::ProgressReporter progress(this, 0, nbTilesX*nbTilesY);
+  itk::ProgressReporter progress(this, 0, nbTilesX * nbTilesY);
 
   // For each tile, propagate the input region and recopy the output
-  ImageType * inputImage = static_cast<ImageType * >(  Superclass::ProcessObject::GetInput(0) );
+  ImageType *  inputImage = static_cast<ImageType *>(Superclass::ProcessObject::GetInput(0));
   unsigned int tx, ty;
-  RegionType subRegion;
+  RegionType   subRegion;
   subRegion.SetSize(m_OutputGridSize);
   for (ty = 0; ty < nbTilesY; ty++)
   {
-    subRegion.SetIndex(1, ty*m_OutputGridSize[1] + region.GetIndex(1));
+    subRegion.SetIndex(1, ty * m_OutputGridSize[1] + region.GetIndex(1));
     for (tx = 0; tx < nbTilesX; tx++)
     {
       // Update the input subregion
-      subRegion.SetIndex(0, tx*m_OutputGridSize[0] + region.GetIndex(0));
+      subRegion.SetIndex(0, tx * m_OutputGridSize[0] + region.GetIndex(0));
 
       // The actual region to copy
       RegionType cpyRegion(subRegion);
@@ -94,12 +91,12 @@ TensorflowStreamerFilter<TInputImage, TOutputImage>
       inputImage->UpdateOutputData();
 
       // Copy the subregion to output
-      itk::ImageAlgorithm::Copy( inputImage, outputPtr, cpyRegion, cpyRegion );
+      itk::ImageAlgorithm::Copy(inputImage, outputPtr, cpyRegion, cpyRegion);
 
       progress.CompletedPixel();
     }
   }
- }
+}
 
 
 } // end namespace otb
-- 
GitLab


From 193d6fe5be51b69f04a54e26ad26c987b9b088c8 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Tue, 5 Apr 2022 20:33:41 +0200
Subject: [PATCH 06/12] FIX: conflicts between develop and master

---
 app/otbDensePolygonClassStatistics.cxx     | 292 ++++++++---------
 app/otbImageClassifierFromDeepFeatures.cxx |  59 ++--
 app/otbLabelImageSampleSelection.cxx       | 246 +++++++-------
 app/otbPatchesExtraction.cxx               | 156 +++------
 app/otbPatchesSelection.cxx                | 297 ++++++-----------
 app/otbTensorflowModelServe.cxx            | 246 +++++++-------
 app/otbTensorflowModelTrain.cxx            | 355 ++++++++++-----------
 app/otbTrainClassifierFromDeepFeatures.cxx | 119 +++----
 8 files changed, 798 insertions(+), 972 deletions(-)

diff --git a/app/otbDensePolygonClassStatistics.cxx b/app/otbDensePolygonClassStatistics.cxx
index 1b9b53f6..fa7c2701 100644
--- a/app/otbDensePolygonClassStatistics.cxx
+++ b/app/otbDensePolygonClassStatistics.cxx
@@ -34,11 +34,10 @@ namespace otb
 namespace Wrapper
 {
 /** Utility function to negate std::isalnum */
-bool
-IsNotAlphaNum(char c)
-{
+bool IsNotAlphaNum(char c)
+  {
   return !std::isalnum(c);
-}
+  }
 
 class DensePolygonClassStatistics : public Application
 {
@@ -54,53 +53,54 @@ public:
   itkTypeMacro(DensePolygonClassStatistics, Application);
 
   /** DataObjects typedef */
-  typedef UInt32ImageType LabelImageType;
-  typedef UInt8ImageType  MaskImageType;
-  typedef VectorData<>    VectorDataType;
+  typedef UInt32ImageType                           LabelImageType;
+  typedef UInt8ImageType                            MaskImageType;
+  typedef VectorData<>                              VectorDataType;
 
   /** ProcessObjects typedef */
-  typedef otb::VectorDataIntoImageProjectionFilter<VectorDataType, FloatVectorImageType> VectorDataReprojFilterType;
+  typedef otb::VectorDataIntoImageProjectionFilter<VectorDataType,
+      FloatVectorImageType>                                                       VectorDataReprojFilterType;
 
-  typedef otb::VectorDataToLabelImageFilter<VectorDataType, LabelImageType> RasterizeFilterType;
+  typedef otb::VectorDataToLabelImageFilter<VectorDataType, LabelImageType>       RasterizeFilterType;
 
   typedef otb::VectorImage<MaskImageType::PixelType>                              InternalMaskImageType;
   typedef otb::ImageToNoDataMaskFilter<FloatVectorImageType, MaskImageType>       NoDataMaskFilterType;
   typedef otb::ImageToVectorImageCastFilter<MaskImageType, InternalMaskImageType> CastFilterType;
 
-  typedef otb::StreamingStatisticsMapFromLabelImageFilter<InternalMaskImageType, LabelImageType> StatsFilterType;
+  typedef otb::StreamingStatisticsMapFromLabelImageFilter<InternalMaskImageType,
+      LabelImageType>                                                             StatsFilterType;
 
-  typedef otb::StatisticsXMLFileWriter<FloatVectorImageType::PixelType> StatWriterType;
+  typedef otb::StatisticsXMLFileWriter<FloatVectorImageType::PixelType>           StatWriterType;
 
-  void
-  DoInit()
+  void DoInit()
   {
     SetName("DensePolygonClassStatistics");
     SetDescription("Computes statistics on a training polygon set.");
 
     // Documentation
     SetDocLongDescription("The application processes a dense set of polygons "
-                          "intended for training (they should have a field giving the associated "
-                          "class). The geometries are analyzed against a support image to compute "
-                          "statistics : \n"
-                          "  - number of samples per class\n"
-                          "  - number of samples per geometry\n");
+      "intended for training (they should have a field giving the associated "
+      "class). The geometries are analyzed against a support image to compute "
+      "statistics : \n"
+      "  - number of samples per class\n"
+      "  - number of samples per geometry\n");
     SetDocLimitations("None");
     SetDocAuthors("Remi Cresson");
 
     AddDocTag(Tags::Learning);
 
-    AddParameter(ParameterType_InputImage, "in", "Input image");
+    AddParameter(ParameterType_InputImage,  "in",   "Input image");
     SetParameterDescription("in", "Support image that will be classified");
-
+    
     AddParameter(ParameterType_InputVectorData, "vec", "Input vectors");
-    SetParameterDescription("vec", "Input geometries to analyze");
-
+    SetParameterDescription("vec","Input geometries to analyze");
+    
     AddParameter(ParameterType_OutputFilename, "out", "Output XML statistics file");
-    SetParameterDescription("out", "Output file to store statistics (XML format)");
+    SetParameterDescription("out","Output file to store statistics (XML format)");
 
     AddParameter(ParameterType_ListView, "field", "Field Name");
-    SetParameterDescription("field", "Name of the field carrying the class number in the input vectors.");
-    SetListViewSingleSelectionMode("field", true);
+    SetParameterDescription("field","Name of the field carrying the class number in the input vectors.");
+    SetListViewSingleSelectionMode("field",true);
 
     ElevationParametersHandler::AddElevationParameters(this, "elev");
 
@@ -110,154 +110,158 @@ public:
     SetDocExampleParameterValue("in", "support_image.tif");
     SetDocExampleParameterValue("vec", "variousVectors.shp");
     SetDocExampleParameterValue("field", "label");
-    SetDocExampleParameterValue("out", "polygonStat.xml");
+    SetDocExampleParameterValue("out","polygonStat.xml");
+
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
 
-    // Retrieve the field name
-    std::vector<int> selectedCFieldIdx = GetSelectedItems("field");
+  // Retrieve the field name
+  std::vector<int> selectedCFieldIdx = GetSelectedItems("field");
 
-    if (selectedCFieldIdx.empty())
+  if(selectedCFieldIdx.empty())
     {
-      otbAppLogFATAL(<< "No field has been selected for data labelling!");
+    otbAppLogFATAL(<<"No field has been selected for data labelling!");
     }
 
-    std::vector<std::string> cFieldNames = GetChoiceNames("field");
-    std::string              fieldName = cFieldNames[selectedCFieldIdx.front()];
-
-    otb::Wrapper::ElevationParametersHandler::SetupDEMHandlerFromElevationParameters(this, "elev");
-
-    // Get inputs
-    FloatVectorImageType::Pointer xs = GetParameterImage("in");
-    VectorDataType *              shp = GetParameterVectorData("vec");
-
-    // Reproject vector data
-    m_VectorDataReprojectionFilter = VectorDataReprojFilterType::New();
-    m_VectorDataReprojectionFilter->SetInputVectorData(shp);
-    m_VectorDataReprojectionFilter->SetInputImage(xs);
-    m_VectorDataReprojectionFilter->Update();
-
-    // Internal no-data value
-    const LabelImageType::ValueType intNoData = itk::NumericTraits<LabelImageType::ValueType>::max();
-
-    // Rasterize vector data (geometry ID)
-    m_RasterizeFIDFilter = RasterizeFilterType::New();
-    m_RasterizeFIDFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
-    m_RasterizeFIDFilter->SetOutputOrigin(xs->GetOrigin());
-    m_RasterizeFIDFilter->SetOutputSpacing(xs->GetSignedSpacing());
-    m_RasterizeFIDFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
-    m_RasterizeFIDFilter->SetBurnAttribute("________"); // Trick to get the polygon ID
-    m_RasterizeFIDFilter->SetGlobalWarningDisplay(false);
-    m_RasterizeFIDFilter->SetOutputProjectionRef(xs->GetProjectionRef());
-    m_RasterizeFIDFilter->SetBackgroundValue(intNoData);
-    m_RasterizeFIDFilter->SetDefaultBurnValue(0);
-
-    // Rasterize vector data (geometry class)
-    m_RasterizeClassFilter = RasterizeFilterType::New();
-    m_RasterizeClassFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
-    m_RasterizeClassFilter->SetOutputOrigin(xs->GetOrigin());
-    m_RasterizeClassFilter->SetOutputSpacing(xs->GetSignedSpacing());
-    m_RasterizeClassFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
-    m_RasterizeClassFilter->SetBurnAttribute(fieldName);
-    m_RasterizeClassFilter->SetOutputProjectionRef(xs->GetProjectionRef());
-    m_RasterizeClassFilter->SetBackgroundValue(intNoData);
-    m_RasterizeClassFilter->SetDefaultBurnValue(0);
-
-    // No data mask
-    m_NoDataFilter = NoDataMaskFilterType::New();
-    m_NoDataFilter->SetInput(xs);
-    m_NoDataCastFilter = CastFilterType::New();
-    m_NoDataCastFilter->SetInput(m_NoDataFilter->GetOutput());
-
-    // Stats (geometry ID)
-    m_FIDStatsFilter = StatsFilterType::New();
-    m_FIDStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
-    m_FIDStatsFilter->SetInputLabelImage(m_RasterizeFIDFilter->GetOutput());
-    m_FIDStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
-    AddProcess(m_FIDStatsFilter->GetStreamer(), "Computing number of samples per vector");
-    m_FIDStatsFilter->Update();
-
-    // Stats (geometry class)
-    m_ClassStatsFilter = StatsFilterType::New();
-    m_ClassStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
-    m_ClassStatsFilter->SetInputLabelImage(m_RasterizeClassFilter->GetOutput());
-    m_ClassStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
-    AddProcess(m_ClassStatsFilter->GetStreamer(), "Computing number of samples per class");
-    m_ClassStatsFilter->Update();
-
-    // Remove the no-data entries
-    StatsFilterType::LabelPopulationMapType fidMap = m_FIDStatsFilter->GetLabelPopulationMap();
-    StatsFilterType::LabelPopulationMapType classMap = m_ClassStatsFilter->GetLabelPopulationMap();
-    fidMap.erase(intNoData);
-    classMap.erase(intNoData);
-
-    m_StatWriter = StatWriterType::New();
-    m_StatWriter->SetFileName(this->GetParameterString("out"));
-    m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerClass", classMap);
-    m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerVector", fidMap);
-    m_StatWriter->Update();
+  std::vector<std::string> cFieldNames = GetChoiceNames("field");  
+  std::string fieldName = cFieldNames[selectedCFieldIdx.front()];
+
+  otb::Wrapper::ElevationParametersHandler::SetupDEMHandlerFromElevationParameters(this,"elev");
+
+  // Get inputs
+  FloatVectorImageType::Pointer xs = GetParameterImage("in");
+  VectorDataType* shp = GetParameterVectorData("vec");
+
+  // Reproject vector data
+  m_VectorDataReprojectionFilter = VectorDataReprojFilterType::New();
+  m_VectorDataReprojectionFilter->SetInputVectorData(shp);
+  m_VectorDataReprojectionFilter->SetInputImage(xs);
+  m_VectorDataReprojectionFilter->Update();
+
+  // Internal no-data value
+  const LabelImageType::ValueType intNoData =
+      itk::NumericTraits<LabelImageType::ValueType>::max();
+
+  // Rasterize vector data (geometry ID)
+  m_RasterizeFIDFilter = RasterizeFilterType::New();
+  m_RasterizeFIDFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
+  m_RasterizeFIDFilter->SetOutputOrigin(xs->GetOrigin());
+  m_RasterizeFIDFilter->SetOutputSpacing(xs->GetSignedSpacing());
+  m_RasterizeFIDFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
+  m_RasterizeFIDFilter->SetBurnAttribute("________"); // Trick to get the polygon ID
+  m_RasterizeFIDFilter->SetGlobalWarningDisplay(false);
+  m_RasterizeFIDFilter->SetOutputProjectionRef(xs->GetProjectionRef());
+  m_RasterizeFIDFilter->SetBackgroundValue(intNoData);
+  m_RasterizeFIDFilter->SetDefaultBurnValue(0);
+
+  // Rasterize vector data (geometry class)
+  m_RasterizeClassFilter = RasterizeFilterType::New();
+  m_RasterizeClassFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
+  m_RasterizeClassFilter->SetOutputOrigin(xs->GetOrigin());
+  m_RasterizeClassFilter->SetOutputSpacing(xs->GetSignedSpacing());
+  m_RasterizeClassFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
+  m_RasterizeClassFilter->SetBurnAttribute(fieldName);
+  m_RasterizeClassFilter->SetOutputProjectionRef(xs->GetProjectionRef());
+  m_RasterizeClassFilter->SetBackgroundValue(intNoData);
+  m_RasterizeClassFilter->SetDefaultBurnValue(0);
+
+  // No data mask
+  m_NoDataFilter = NoDataMaskFilterType::New();
+  m_NoDataFilter->SetInput(xs);
+  m_NoDataCastFilter = CastFilterType::New();
+  m_NoDataCastFilter->SetInput(m_NoDataFilter->GetOutput());
+
+  // Stats (geometry ID)
+  m_FIDStatsFilter = StatsFilterType::New();
+  m_FIDStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
+  m_FIDStatsFilter->SetInputLabelImage(m_RasterizeFIDFilter->GetOutput());
+  m_FIDStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
+  AddProcess(m_FIDStatsFilter->GetStreamer(), "Computing number of samples per vector");
+  m_FIDStatsFilter->Update();
+
+  // Stats (geometry class)
+  m_ClassStatsFilter = StatsFilterType::New();
+  m_ClassStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
+  m_ClassStatsFilter->SetInputLabelImage(m_RasterizeClassFilter->GetOutput());
+  m_ClassStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
+  AddProcess(m_ClassStatsFilter->GetStreamer(), "Computing number of samples per class");
+  m_ClassStatsFilter->Update();
+
+  // Remove the no-data entries
+  StatsFilterType::LabelPopulationMapType fidMap = m_FIDStatsFilter->GetLabelPopulationMap();
+  StatsFilterType::LabelPopulationMapType classMap = m_ClassStatsFilter->GetLabelPopulationMap();
+  fidMap.erase(intNoData);
+  classMap.erase(intNoData);
+
+  m_StatWriter = StatWriterType::New();
+  m_StatWriter->SetFileName(this->GetParameterString("out"));
+  m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerClass", classMap);
+  m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerVector", fidMap);
+  m_StatWriter->Update();
+
   }
 
-  void
-  DoUpdateParameters()
+  void DoUpdateParameters()
   {
-    if (HasValue("vec"))
-    {
-      std::string              vectorFile = GetParameterString("vec");
-      ogr::DataSource::Pointer ogrDS = ogr::DataSource::New(vectorFile, ogr::DataSource::Modes::Read);
-      ogr::Layer               layer = ogrDS->GetLayer(0);
-      ogr::Feature             feature = layer.ogr().GetNextFeature();
+     if (HasValue("vec"))
+      {
+      std::string vectorFile = GetParameterString("vec");
+      ogr::DataSource::Pointer ogrDS =
+        ogr::DataSource::New(vectorFile, ogr::DataSource::Modes::Read);
+      ogr::Layer layer = ogrDS->GetLayer(0);
+      ogr::Feature feature = layer.ogr().GetNextFeature();
 
       ClearChoices("field");
 
-      for (int iField = 0; iField < feature.ogr().GetFieldCount(); iField++)
-      {
+      for(int iField=0; iField<feature.ogr().GetFieldCount(); iField++)
+        {
         std::string key, item = feature.ogr().GetFieldDefnRef(iField)->GetNameRef();
         key = item;
-        std::string::iterator end = std::remove_if(key.begin(), key.end(), IsNotAlphaNum);
+        std::string::iterator end = std::remove_if(key.begin(),key.end(),IsNotAlphaNum);
         std::transform(key.begin(), end, key.begin(), tolower);
 
         OGRFieldType fieldType = feature.ogr().GetFieldDefnRef(iField)->GetType();
 
-        if (fieldType == OFTString || fieldType == OFTInteger || fieldType == OFTInteger64)
-        {
-          std::string tmpKey = "field." + key.substr(0, end - key.begin());
-          AddChoice(tmpKey, item);
+        if(fieldType == OFTString || fieldType == OFTInteger || fieldType == OFTInteger64)
+          {
+          std::string tmpKey="field."+key.substr(0, end - key.begin());
+          AddChoice(tmpKey,item);
+          }
         }
       }
-    }
-
-    // Check that the extension of the output parameter is XML (mandatory for
-    // StatisticsXMLFileWriter)
-    // Check it here to trigger the error before polygons analysis
-
-    if (HasValue("out"))
-    {
-      // Store filename extension
-      // Check that the right extension is given : expected .xml
-      const std::string extension = itksys::SystemTools::GetFilenameLastExtension(this->GetParameterString("out"));
 
-      if (itksys::SystemTools::LowerCase(extension) != ".xml")
-      {
-        otbAppLogFATAL(<< extension << " is a wrong extension for parameter \"out\": Expected .xml");
-      }
-    }
+     // Check that the extension of the output parameter is XML (mandatory for
+     // StatisticsXMLFileWriter)
+     // Check it here to trigger the error before polygons analysis
+
+     if (HasValue("out"))
+       {
+       // Store filename extension
+       // Check that the right extension is given : expected .xml
+       const std::string extension = itksys::SystemTools::GetFilenameLastExtension(this->GetParameterString("out"));
+
+       if (itksys::SystemTools::LowerCase(extension) != ".xml")
+         {
+         otbAppLogFATAL( << extension << " is a wrong extension for parameter \"out\": Expected .xml" );
+         }
+       }
   }
 
 
+
 private:
   // Filters
   VectorDataReprojFilterType::Pointer m_VectorDataReprojectionFilter;
-  RasterizeFilterType::Pointer        m_RasterizeFIDFilter;
-  RasterizeFilterType::Pointer        m_RasterizeClassFilter;
-  NoDataMaskFilterType::Pointer       m_NoDataFilter;
-  CastFilterType::Pointer             m_NoDataCastFilter;
-  StatsFilterType::Pointer            m_FIDStatsFilter;
-  StatsFilterType::Pointer            m_ClassStatsFilter;
-  StatWriterType::Pointer             m_StatWriter;
+  RasterizeFilterType::Pointer m_RasterizeFIDFilter;
+  RasterizeFilterType::Pointer m_RasterizeClassFilter;
+  NoDataMaskFilterType::Pointer m_NoDataFilter;
+  CastFilterType::Pointer m_NoDataCastFilter;
+  StatsFilterType::Pointer m_FIDStatsFilter;
+  StatsFilterType::Pointer m_ClassStatsFilter;
+  StatWriterType::Pointer m_StatWriter;
+
 };
 
 } // end of namespace Wrapper
diff --git a/app/otbImageClassifierFromDeepFeatures.cxx b/app/otbImageClassifierFromDeepFeatures.cxx
index 3760f587..f3ffd273 100644
--- a/app/otbImageClassifierFromDeepFeatures.cxx
+++ b/app/otbImageClassifierFromDeepFeatures.cxx
@@ -34,23 +34,23 @@ class ImageClassifierFromDeepFeatures : public CompositeApplication
 {
 public:
   /** Standard class typedefs. */
-  typedef ImageClassifierFromDeepFeatures Self;
-  typedef Application                     Superclass;
-  typedef itk::SmartPointer<Self>         Pointer;
-  typedef itk::SmartPointer<const Self>   ConstPointer;
+  typedef ImageClassifierFromDeepFeatures              Self;
+  typedef Application                         Superclass;
+  typedef itk::SmartPointer<Self>             Pointer;
+  typedef itk::SmartPointer<const Self>       ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(ImageClassifierFromDeepFeatures, otb::Wrapper::CompositeApplication);
 
 private:
+
   //
   // Add an input source, which includes:
   // -an input image list
   // -an input patchsize (dimensions of samples)
   //
-  void
-  AddAnInputImage(int inputNumber = 0)
+  void AddAnInputImage(int inputNumber = 0)
   {
     inputNumber++;
 
@@ -64,8 +64,7 @@ private:
   }
 
 
-  void
-  DoInit()
+  void DoInit()
   {
 
     SetName("ImageClassifierFromDeepFeatures");
@@ -82,48 +81,48 @@ private:
     ClearApplications();
 
     // Add applications
-    AddApplication("ImageClassifier", "classif", "Images classifier");
-    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model");
+    AddApplication("ImageClassifier",      "classif", "Images classifier"  );
+    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model" );
 
     // Model shared parameters
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources(); i++)
+    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
     {
       AddAnInputImage(i);
     }
-    ShareParameter("deepmodel", "tfmodel.model", "Deep net model parameters", "Deep net model parameters");
-    ShareParameter("output", "tfmodel.output", "Deep net outputs parameters", "Deep net outputs parameters");
-    ShareParameter("optim",
-                   "tfmodel.optim",
-                   "This group of parameters allows optimization of processing time",
-                   "This group of parameters allows optimization of processing time");
+    ShareParameter("deepmodel",  "tfmodel.model",
+        "Deep net model parameters",      "Deep net model parameters");
+    ShareParameter("output",     "tfmodel.output",
+        "Deep net outputs parameters",
+        "Deep net outputs parameters");
+    ShareParameter("optim", "tfmodel.optim",
+        "This group of parameters allows optimization of processing time",
+        "This group of parameters allows optimization of processing time");
 
     // Classify shared parameters
-    ShareParameter("model", "classif.model", "Model file", "Model file");
-    ShareParameter("imstat", "classif.imstat", "Statistics file", "Statistics file");
-    ShareParameter("nodatalabel", "classif.nodatalabel", "Label mask value", "Label mask value");
-    ShareParameter("out", "classif.out", "Output image", "Output image");
-    ShareParameter("confmap", "classif.confmap", "Confidence map image", "Confidence map image");
-    ShareParameter("ram", "classif.ram", "Ram", "Ram");
+    ShareParameter("model"      , "classif.model"      , "Model file"          , "Model file"          );
+    ShareParameter("imstat"     , "classif.imstat"     , "Statistics file"     , "Statistics file"     );
+    ShareParameter("nodatalabel", "classif.nodatalabel", "Label mask value"    , "Label mask value"    );
+    ShareParameter("out"        , "classif.out"        , "Output image"        , "Output image"        );
+    ShareParameter("confmap"    , "classif.confmap"    , "Confidence map image", "Confidence map image");
+    ShareParameter("ram"        , "classif.ram"        , "Ram"                 , "Ram"                 );
   }
 
-  void
-  DoUpdateParameters()
+  void DoUpdateParameters()
   {
     UpdateInternalParameters("classif");
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
     ExecuteInternal("tfmodel");
-    GetInternalApplication("classif")->SetParameterInputImage(
-      "in", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
+    GetInternalApplication("classif")->SetParameterInputImage("in", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
     UpdateInternalParameters("classif");
     ExecuteInternal("classif");
   }
+
 };
 } // namespace Wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT(otb::Wrapper::ImageClassifierFromDeepFeatures)
+OTB_APPLICATION_EXPORT( otb::Wrapper::ImageClassifierFromDeepFeatures )
diff --git a/app/otbLabelImageSampleSelection.cxx b/app/otbLabelImageSampleSelection.cxx
index f0d2c03d..50396fa0 100644
--- a/app/otbLabelImageSampleSelection.cxx
+++ b/app/otbLabelImageSampleSelection.cxx
@@ -35,62 +35,59 @@ class LabelImageSampleSelection : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef LabelImageSampleSelection     Self;
-  typedef Application                   Superclass;
-  typedef itk::SmartPointer<Self>       Pointer;
-  typedef itk::SmartPointer<const Self> ConstPointer;
+  typedef LabelImageSampleSelection           Self;
+  typedef Application                         Superclass;
+  typedef itk::SmartPointer<Self>             Pointer;
+  typedef itk::SmartPointer<const Self>       ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(LabelImageSampleSelection, Application);
 
   /** Vector data typedefs */
-  typedef VectorDataType::DataTreeType            DataTreeType;
-  typedef itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
-  typedef VectorDataType::DataNodeType            DataNodeType;
-  typedef DataNodeType::Pointer                   DataNodePointer;
+  typedef VectorDataType::DataTreeType                 DataTreeType;
+  typedef itk::PreOrderTreeIterator<DataTreeType>      TreeIteratorType;
+  typedef VectorDataType::DataNodeType                 DataNodeType;
+  typedef DataNodeType::Pointer                        DataNodePointer;
 
   /** typedefs */
-  typedef Int16ImageType LabelImageType;
-  typedef unsigned int   IndexValueType;
+  typedef Int16ImageType                               LabelImageType;
+  typedef unsigned int                                 IndexValueType;
 
-  void
-  DoUpdateParameters()
-  {}
+  void DoUpdateParameters()
+  {
+  }
 
   /*
    * Display the percentage
    */
-  void
-  ShowProgress(unsigned int count, unsigned int total, unsigned int step = 1000)
+  void ShowProgress(unsigned int count, unsigned int total, unsigned int step = 1000)
   {
     if (count % step == 0)
     {
-      std::cout << std::setprecision(3) << "\r" << (100.0 * count / (float)total) << "%      " << std::flush;
+      std::cout << std::setprecision(3) << "\r" << (100.0 * count / (float) total) << "%      " << std::flush;
     }
   }
 
-  void
-  ShowProgressDone()
+  void ShowProgressDone()
   {
     std::cout << "\rDone      " << std::flush;
     std::cout << std::endl;
   }
 
-  void
-  DoInit()
+  void DoInit()
   {
 
     // Documentation
     SetName("LabelImageSampleSelection");
     SetDescription("This application extracts points from an input label image. "
-                   "This application is like \"SampleSelection\", but uses an input label "
-                   "image, rather than an input vector data.");
+        "This application is like \"SampleSelection\", but uses an input label "
+        "image, rather than an input vector data.");
     SetDocLongDescription("This application produces a vector data containing "
-                          "a set of points centered on the pixels of the input label image. "
-                          "The user can control the number of points. The default strategy consists "
-                          "in producing the same number of points in each class. If one class has a "
-                          "smaller number of points than requested, this one is adjusted.");
+        "a set of points centered on the pixels of the input label image. "
+        "The user can control the number of points. The default strategy consists "
+        "in producing the same number of points in each class. If one class has a "
+        "smaller number of points than requested, this one is adjusted.");
 
     SetDocAuthors("Remi Cresson");
 
@@ -100,41 +97,39 @@ public:
     // Strategy
     AddParameter(ParameterType_Choice, "strategy", "Sampling strategy");
 
-    AddChoice("strategy.constant", "Set the same samples counts for all classes");
-    SetParameterDescription("strategy.constant", "Set the same samples counts for all classes");
+    AddChoice("strategy.constant","Set the same samples counts for all classes");
+    SetParameterDescription("strategy.constant","Set the same samples counts for all classes");
 
     AddParameter(ParameterType_Int, "strategy.constant.nb", "Number of samples for all classes");
     SetParameterDescription("strategy.constant.nb", "Number of samples for all classes");
-    SetMinimumParameterIntValue("strategy.constant.nb", 1);
-    SetDefaultParameterInt("strategy.constant.nb", 1000);
+    SetMinimumParameterIntValue("strategy.constant.nb",1);
+    SetDefaultParameterInt("strategy.constant.nb",1000);
 
-    AddChoice("strategy.total", "Set the total number of samples to generate, and use class proportions.");
-    SetParameterDescription("strategy.total",
-                            "Set the total number of samples to generate, and use class proportions.");
-    AddParameter(ParameterType_Int, "strategy.total.v", "The number of samples to generate");
-    SetParameterDescription("strategy.total.v", "The number of samples to generate");
-    SetMinimumParameterIntValue("strategy.total.v", 1);
-    SetDefaultParameterInt("strategy.total.v", 1000);
+    AddChoice("strategy.total","Set the total number of samples to generate, and use class proportions.");
+    SetParameterDescription("strategy.total","Set the total number of samples to generate, and use class proportions.");
+    AddParameter(ParameterType_Int,"strategy.total.v","The number of samples to generate");
+    SetParameterDescription("strategy.total.v","The number of samples to generate");
+    SetMinimumParameterIntValue("strategy.total.v",1);
+    SetDefaultParameterInt("strategy.total.v",1000);
 
-    AddChoice("strategy.smallest", "Set same number of samples for all classes, with the smallest class fully sampled");
-    SetParameterDescription("strategy.smallest",
-                            "Set same number of samples for all classes, with the smallest class fully sampled");
+    AddChoice("strategy.smallest","Set same number of samples for all classes, with the smallest class fully sampled");
+    SetParameterDescription("strategy.smallest","Set same number of samples for all classes, with the smallest class fully sampled");
 
-    AddChoice("strategy.all", "Take all samples");
-    SetParameterDescription("strategy.all", "Take all samples");
+    AddChoice("strategy.all","Take all samples");
+    SetParameterDescription("strategy.all","Take all samples");
 
     // Default strategy : smallest
-    SetParameterString("strategy", "constant");
+    SetParameterString("strategy","constant");
 
     // Input no-data value
     AddParameter(ParameterType_Int, "nodata", "nodata value");
-    MandatoryOn("nodata");
-    SetDefaultParameterInt("nodata", -1);
+    MandatoryOn                    ("nodata");
+    SetDefaultParameterInt         ("nodata", -1);
 
     // Padding
     AddParameter(ParameterType_Int, "pad", "padding, in pixels");
-    SetDefaultParameterInt("pad", 0);
-    MandatoryOff("pad");
+    SetDefaultParameterInt         ("pad", 0);
+    MandatoryOff                   ("pad");
 
     // Output points
     AddParameter(ParameterType_OutputVectorData, "outvec", "output set of points");
@@ -144,20 +139,19 @@ public:
     SetDocExampleParameterValue("outvec", "terrain_truth_points_sel.sqlite");
 
     AddRAMParameter();
+
   }
 
 
-  void
-  DoExecute()
+  void DoExecute()
   {
 
     // Count the number of pixels in each class
     const LabelImageType::InternalPixelType MAX_NB_OF_CLASSES =
-      itk::NumericTraits<LabelImageType::InternalPixelType>::max();
-    ;
+        itk::NumericTraits<LabelImageType::InternalPixelType>::max();;
     LabelImageType::InternalPixelType class_begin = MAX_NB_OF_CLASSES;
     LabelImageType::InternalPixelType class_end = 0;
-    vnl_vector<IndexValueType>        tmp_number_of_samples(MAX_NB_OF_CLASSES, 0);
+    vnl_vector<IndexValueType> tmp_number_of_samples(MAX_NB_OF_CLASSES, 0);
 
     otbAppLogINFO("Computing number of pixels in each class");
 
@@ -167,10 +161,10 @@ public:
     m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
 
     // We pad the image, if this is requested by the user
-    LabelImageType::Pointer    inputImage = GetParameterInt16Image("inref");
+    LabelImageType::Pointer inputImage = GetParameterInt16Image("inref");
     LabelImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
     entireRegion.ShrinkByRadius(GetParameterInt("pad"));
-    m_StreamingManager->PrepareStreaming(inputImage, entireRegion);
+    m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
 
     // Get nodata value
     const LabelImageType::InternalPixelType nodata = GetParameterInt("nodata");
@@ -181,7 +175,7 @@ public:
     {
       LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
       tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<LabelImageType> inIt(inputImage, streamRegion);
+      itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
         LabelImageType::InternalPixelType pixVal = inIt.Get();
@@ -210,13 +204,14 @@ public:
     // Number of samples in each class (target)
     vnl_vector<IndexValueType> target_number_of_samples(number_of_classes, 0);
 
-    otbAppLogINFO("Number of classes: " << number_of_classes << " starting from " << class_begin << " to " << class_end
-                                        << " (no-data is " << nodata << ")");
-    otbAppLogINFO("Number of pixels in each class: " << number_of_samples);
+    otbAppLogINFO( "Number of classes: " << number_of_classes <<
+        " starting from " << class_begin <<
+        " to " << class_end << " (no-data is " << nodata << ")");
+    otbAppLogINFO( "Number of pixels in each class: " << number_of_samples );
 
     // Check the smallest number of samples amongst classes
     IndexValueType min_elem_in_class = itk::NumericTraits<IndexValueType>::max();
-    for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
+    for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
       min_elem_in_class = std::min(min_elem_in_class, number_of_samples[classIdx]);
 
     // If one class is empty, throw an error
@@ -231,73 +226,79 @@ public:
     // Compute the sampling step for each classes, depending on the chosen strategy
     switch (this->GetParameterInt("strategy"))
     {
-      // constant
-      case 0: {
-        // Set the target number of samples in each class
-        target_number_of_samples.fill(GetParameterInt("strategy.constant.nb"));
-
-        // re adjust the number of samples to select in each class
-        if (min_elem_in_class < target_number_of_samples[0])
-        {
-          otbAppLogWARNING("Smallest class has " << min_elem_in_class << " samples but a number of "
-                                                 << target_number_of_samples[0] << " is given. Using "
-                                                 << min_elem_in_class);
-          target_number_of_samples.fill(min_elem_in_class);
-        }
+    // constant
+    case 0:
+    {
+      // Set the target number of samples in each class
+      target_number_of_samples.fill(GetParameterInt("strategy.constant.nb"));
 
-        // Compute the sampling step
-        for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
-          step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
+      // re adjust the number of samples to select in each class
+      if (min_elem_in_class < target_number_of_samples[0])
+      {
+        otbAppLogWARNING("Smallest class has " << min_elem_in_class <<
+            " samples but a number of " << target_number_of_samples[0] <<
+            " is given. Using " << min_elem_in_class);
+        target_number_of_samples.fill( min_elem_in_class );
       }
-      break;
 
-      // total
-      case 1: {
-        // Compute the sampling step
-        IndexValueType step = number_of_samples.sum() / this->GetParameterInt("strategy.total.v");
-        if (step == 0)
-        {
-          otbAppLogWARNING("The number of samples available is smaller than the required number of samples. "
-                           << "Setting sampling step to 1.");
-          step = 1;
-        }
-        step_for_class.fill(step);
+      // Compute the sampling step
+      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
+        step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
+    }
+    break;
 
-        // Compute the target number of samples
-        for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
-          target_number_of_samples[classIdx] = number_of_samples[classIdx] / step;
+    // total
+    case 1:
+    {
+      // Compute the sampling step
+      IndexValueType step = number_of_samples.sum() / this->GetParameterInt("strategy.total.v");
+      if (step == 0)
+      {
+        otbAppLogWARNING("The number of samples available is smaller than the required number of samples. " <<
+            "Setting sampling step to 1.");
+        step = 1;
       }
-      break;
+      step_for_class.fill(step);
 
-      // smallest
-      case 2: {
-        // Set the target number of samples to the smallest class
-        target_number_of_samples.fill(min_elem_in_class);
+      // Compute the target number of samples
+      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
+        target_number_of_samples[classIdx] = number_of_samples[classIdx] / step;
 
-        // Compute the sampling step
-        for (LabelImageType::InternalPixelType classIdx = 0; classIdx < number_of_classes; classIdx++)
-          step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
-      }
-      break;
+    }
+    break;
 
-      // All
-      case 3: {
-        // Easy
-        step_for_class.fill(1);
-        target_number_of_samples = number_of_samples;
-      }
+    // smallest
+    case 2:
+    {
+      // Set the target number of samples to the smallest class
+      target_number_of_samples.fill( min_elem_in_class );
+
+      // Compute the sampling step
+      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
+        step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
+
+    }
+    break;
+
+    // All
+    case 3:
+    {
+      // Easy
+      step_for_class.fill(1);
+      target_number_of_samples = number_of_samples;
+    }
+    break;
+    default:
+      otbAppLogFATAL("Strategy mode unknown :"<<this->GetParameterString("strategy"));
       break;
-      default:
-        otbAppLogFATAL("Strategy mode unknown :" << this->GetParameterString("strategy"));
-        break;
     }
 
     // Print quick summary
     otbAppLogINFO("Sampling summary:");
     otbAppLogINFO("\tClass\tStep\tTot");
-    for (LabelImageType::InternalPixelType i = 0; i < number_of_classes; i++)
+    for (LabelImageType::InternalPixelType i = 0 ; i < number_of_classes ; i++)
     {
-      vnl_vector<int> tmp(3, 0);
+      vnl_vector<int> tmp (3,0);
       tmp[0] = i + class_begin;
       tmp[1] = step_for_class[i];
       tmp[2] = target_number_of_samples[i];
@@ -308,8 +309,8 @@ public:
     // TODO: how to pre-allocate the datatree?
     m_OutVectorData = VectorDataType::New();
     DataTreeType::Pointer tree = m_OutVectorData->GetDataTree();
-    DataNodePointer       root = tree->GetRoot()->Get();
-    DataNodePointer       document = DataNodeType::New();
+    DataNodePointer root = tree->GetRoot()->Get();
+    DataNodePointer document = DataNodeType::New();
     document->SetNodeType(DOCUMENT);
     tree->Add(document, root);
 
@@ -321,15 +322,15 @@ public:
     // Second iteration, to prepare the samples
     vnl_vector<IndexValueType> sampledCount(number_of_classes, 0);
     vnl_vector<IndexValueType> iteratorCount(number_of_classes, 0);
-    IndexValueType             n_tot = 0;
-    const IndexValueType       target_n_tot = target_number_of_samples.sum();
+    IndexValueType n_tot = 0;
+    const IndexValueType target_n_tot = target_number_of_samples.sum();
     for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
     {
       LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
       tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<LabelImageType> inIt(inputImage, streamRegion);
+      itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
 
-      for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
+      for (inIt.GoToBegin() ; !inIt.IsAtEnd() ; ++inIt)
       {
         LabelImageType::InternalPixelType classVal = inIt.Get();
 
@@ -341,7 +342,7 @@ public:
           iteratorCount[classVal]++;
 
           // Every Xi samples (Xi is the step for class i)
-          if (iteratorCount[classVal] % ((int)step_for_class[classVal]) == 0 &&
+          if (iteratorCount[classVal] % ((int) step_for_class[classVal]) == 0 &&
               sampledCount[classVal] < target_number_of_samples[classVal])
           {
             // Add this sample
@@ -365,14 +366,15 @@ public:
           } // sample this one
         }
       } // next pixel
-    }   // next streaming region
+    } // next streaming region
     ShowProgressDone();
 
-    otbAppLogINFO("Number of samples in each class: " << sampledCount);
+    otbAppLogINFO( "Number of samples in each class: " << sampledCount );
 
-    otbAppLogINFO("Writing output vector data");
+    otbAppLogINFO( "Writing output vector data");
 
     SetParameterOutputVectorData("outvec", m_OutVectorData);
+
   }
 
 private:
@@ -380,7 +382,7 @@ private:
 
 }; // end of class
 
-} // namespace Wrapper
+} // end namespace wrapper
 } // end namespace otb
 
 OTB_APPLICATION_EXPORT(otb::Wrapper::LabelImageSampleSelection)
diff --git a/app/otbPatchesExtraction.cxx b/app/otbPatchesExtraction.cxx
index 0eed96df..33eb603c 100644
--- a/app/otbPatchesExtraction.cxx
+++ b/app/otbPatchesExtraction.cxx
@@ -33,10 +33,10 @@ class PatchesExtraction : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef PatchesExtraction             Self;
-  typedef Application                   Superclass;
-  typedef itk::SmartPointer<Self>       Pointer;
-  typedef itk::SmartPointer<const Self> ConstPointer;
+  typedef PatchesExtraction                   Self;
+  typedef Application                         Superclass;
+  typedef itk::SmartPointer<Self>             Pointer;
+  typedef itk::SmartPointer<const Self>       ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
@@ -46,21 +46,21 @@ public:
   typedef otb::TensorflowSampler<FloatVectorImageType, VectorDataType> SamplerType;
 
   /** Typedefs for image concatenation */
-  typedef TensorflowSource<FloatVectorImageType> TFSourceType;
+  typedef TensorflowSource<FloatVectorImageType>                       TFSourceType;
 
   //
   // Store stuff related to one source
   //
   struct SourceBundle
   {
-    TFSourceType                   m_ImageSource; // Image source
-    FloatVectorImageType::SizeType m_PatchSize;   // Patch size
+    TFSourceType                       m_ImageSource;   // Image source
+    FloatVectorImageType::SizeType     m_PatchSize;          // Patch size
 
-    std::string m_KeyIn;     // Key of input image list
-    std::string m_KeyOut;    // Key of output samples image
-    std::string m_KeyPszX;   // Key for samples sizes X
-    std::string m_KeyPszY;   // Key for samples sizes Y
-    std::string m_KeyNoData; // Key for no-data value
+    std::string                        m_KeyIn;   // Key of input image list
+    std::string                        m_KeyOut;  // Key of output samples image
+    std::string                        m_KeyPszX; // Key for samples sizes X
+    std::string                        m_KeyPszY; // Key for samples sizes Y
+    std::string                        m_KeyNoData; // Key for no-data value
 
     FloatVectorImageType::InternalPixelType m_NoDataValue; // No data value
   };
@@ -72,14 +72,12 @@ public:
   // -an output image (samples)
   // -an input patchsize (dimensions of samples)
   //
-  void
-  AddAnInputImage()
+  void AddAnInputImage()
   {
     // Number of source
     unsigned int inputNumber = m_Bundles.size() + 1;
 
     // Create keys and descriptions
-<<<<<<< HEAD
     std::stringstream ss_group_key, ss_desc_group, ss_key_in, ss_key_out, ss_desc_in,
     ss_desc_out, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y, ss_key_nodata, ss_desc_nodata;
     ss_group_key   << "source"                    << inputNumber;
@@ -105,52 +103,25 @@ public:
     SetMinimumParameterIntValue               (ss_key_dims_y.str(), 1);
     AddParameter(ParameterType_Float,          ss_key_nodata.str(), ss_desc_nodata.str());
     MandatoryOff                              (ss_key_nodata.str());
-=======
-    std::stringstream ss_group_key, ss_desc_group, ss_key_in, ss_key_out, ss_desc_in, ss_desc_out, ss_key_dims_x,
-      ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y, ss_key_nodata, ss_desc_nodata;
-    ss_group_key << "source" << inputNumber;
-    ss_desc_group << "Parameters for source " << inputNumber;
-    ss_key_out << ss_group_key.str() << ".out";
-    ss_desc_out << "Output patches for image " << inputNumber;
-    ss_key_in << ss_group_key.str() << ".il";
-    ss_desc_in << "Input image(s) " << inputNumber;
-    ss_key_dims_x << ss_group_key.str() << ".patchsizex";
-    ss_desc_dims_x << "X patch size for image " << inputNumber;
-    ss_key_dims_y << ss_group_key.str() << ".patchsizey";
-    ss_desc_dims_y << "Y patch size for image " << inputNumber;
-    ss_key_nodata << ss_group_key.str() << ".nodata";
-    ss_desc_nodata << "No-data value for image " << inputNumber << "(used only if \"usenodata\" is on)";
-
-    // Populate group
-    AddParameter(ParameterType_Group, ss_group_key.str(), ss_desc_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_in.str(), ss_desc_in.str());
-    AddParameter(ParameterType_OutputImage, ss_key_out.str(), ss_desc_out.str());
-    AddParameter(ParameterType_Int, ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue(ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int, ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue(ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_Float, ss_key_nodata.str(), ss_desc_nodata.str());
-    SetDefaultParameterFloat(ss_key_nodata.str(), 0);
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
 
     // Add a new bundle
     SourceBundle bundle;
-    bundle.m_KeyIn = ss_key_in.str();
-    bundle.m_KeyOut = ss_key_out.str();
+    bundle.m_KeyIn   = ss_key_in.str();
+    bundle.m_KeyOut  = ss_key_out.str();
     bundle.m_KeyPszX = ss_key_dims_x.str();
     bundle.m_KeyPszY = ss_key_dims_y.str();
     bundle.m_KeyNoData = ss_key_nodata.str();
 
     m_Bundles.push_back(bundle);
+
   }
 
   //
   // Prepare bundles from the number of points
   //
-  void
-  PrepareInputs()
+  void PrepareInputs()
   {
-    for (auto & bundle : m_Bundles)
+    for (auto& bundle: m_Bundles)
     {
       // Create a stack of input images
       FloatVectorImageListType::Pointer list = GetParameterImageList(bundle.m_KeyIn);
@@ -169,31 +140,26 @@ public:
     }
   }
 
-  void
-  DoInit()
+  void DoInit()
   {
 
     // Documentation
     SetName("PatchesExtraction");
     SetDescription("This application extracts patches in multiple input images. Change "
-                   "the " +
-                   tf::ENV_VAR_NAME_NSOURCES +
-                   " environment variable to set the number of "
-                   "sources.");
-    SetDocLongDescription(
-      "The application takes an input vector layer which is a set of "
-      "points, typically the output of the \"SampleSelection\" or the \"LabelImageSampleSelection\" "
-      "application to sample patches in the input images (samples are centered on the points). "
-      "A \"source\" parameters group is composed of (i) an input image list (can be "
-      "one image e.g. high res. image, or multiple e.g. time series), (ii) the size "
-      "of the patches to sample, and (iii) the output images of patches which will "
-      "be generated at the end of the process. The example below show how to "
-      "set the samples sizes. For a SPOT6 image for instance, the patch size can "
-      "be 64x64 and for an input Sentinel-2 time series the patch size could be "
-      "1x1. Note that if a dimension size is not defined, the largest one will "
-      "be used (i.e. input image dimensions. The number of input sources can be changed "
-      "at runtime by setting the system environment variable " +
-      tf::ENV_VAR_NAME_NSOURCES);
+        "the " + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of "
+        "sources.");
+    SetDocLongDescription("The application takes an input vector layer which is a set of "
+        "points, typically the output of the \"SampleSelection\" or the \"LabelImageSampleSelection\" "
+        "application to sample patches in the input images (samples are centered on the points). "
+        "A \"source\" parameters group is composed of (i) an input image list (can be "
+        "one image e.g. high res. image, or multiple e.g. time series), (ii) the size "
+        "of the patches to sample, and (iii) the output images of patches which will "
+        "be generated at the end of the process. The example below show how to "
+        "set the samples sizes. For a SPOT6 image for instance, the patch size can "
+        "be 64x64 and for an input Sentinel-2 time series the patch size could be "
+        "1x1. Note that if a dimension size is not defined, the largest one will "
+        "be used (i.e. input image dimensions. The number of input sources can be changed "
+        "at runtime by setting the system environment variable " + tf::ENV_VAR_NAME_NSOURCES);
 
     SetDocAuthors("Remi Cresson");
 
@@ -201,40 +167,32 @@ public:
 
     // Input/output images
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources(); i++)
+    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
       AddAnInputImage();
 
     // Input vector data
-    AddParameter(
-      ParameterType_InputVectorData, "vec", "Positions of the samples (must be in the same projection as input image)");
+    AddParameter(ParameterType_InputVectorData, "vec", "Positions of the samples (must be in the same projection as input image)");
 
-<<<<<<< HEAD
-=======
-    // No data parameters
-    AddParameter(ParameterType_Bool, "usenodata", "Reject samples that have no-data value");
-    MandatoryOff("usenodata");
-
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     // Output label
     AddParameter(ParameterType_OutputImage, "outlabels", "output labels");
-    SetDefaultOutputPixelType("outlabels", ImagePixelType_uint8);
-    MandatoryOff("outlabels");
+    SetDefaultOutputPixelType              ("outlabels", ImagePixelType_uint8);
+    MandatoryOff                           ("outlabels");
 
     // Class field
     AddParameter(ParameterType_String, "field", "field of class in the vector data");
 
     // Examples values
-    SetDocExampleParameterValue("vec", "points.sqlite");
-    SetDocExampleParameterValue("source1.il", "$s2_list");
+    SetDocExampleParameterValue("vec",                "points.sqlite");
+    SetDocExampleParameterValue("source1.il",         "$s2_list");
     SetDocExampleParameterValue("source1.patchsizex", "16");
     SetDocExampleParameterValue("source1.patchsizey", "16");
-    SetDocExampleParameterValue("field", "class");
-    SetDocExampleParameterValue("source1.out", "outpatches_16x16.tif");
-    SetDocExampleParameterValue("outlabels", "outlabels.tif");
+    SetDocExampleParameterValue("field",              "class");
+    SetDocExampleParameterValue("source1.out",        "outpatches_16x16.tif");
+    SetDocExampleParameterValue("outlabels",          "outlabels.tif");
+
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
 
     PrepareInputs();
@@ -243,17 +201,8 @@ public:
     SamplerType::Pointer sampler = SamplerType::New();
     sampler->SetInputVectorData(GetParameterVectorData("vec"));
     sampler->SetField(GetParameterAsString("field"));
-<<<<<<< HEAD
 
     for (auto& bundle: m_Bundles)
-=======
-    if (GetParameterInt("usenodata") == 1)
-    {
-      otbAppLogINFO("Rejecting samples that have at least one no-data value");
-      sampler->SetRejectPatchesWithNodata(true);
-    }
-    for (auto & bundle : m_Bundles)
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     {
       if (HasValue(bundle.m_KeyNoData)) 
         {
@@ -275,7 +224,6 @@ public:
     otbAppLogINFO("Number of samples rejected : " << sampler->GetNumberOfRejectedSamples());
 
     // Save patches image
-<<<<<<< HEAD
     if (sampler->GetNumberOfAcceptedSamples()>0)
     {
       for (unsigned int i = 0 ; i < m_Bundles.size() ; i++)
@@ -284,9 +232,6 @@ public:
       }
     }
     else
-=======
-    for (unsigned int i = 0; i < m_Bundles.size(); i++)
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     {
       otbAppLogFATAL("No patch to sample. Please check that your vector data falls inside your images, and no-data values.");
     }
@@ -297,19 +242,20 @@ public:
     {
       SetParameterOutputImage("outlabels", sampler->GetOutputLabelImage());
     }
-  }
-
 
-  void
-  DoUpdateParameters()
-  {}
+  }
+  
+  
+  void DoUpdateParameters()
+  {
+  }
 
 private:
   std::vector<SourceBundle> m_Bundles;
 
 }; // end of class
 
-} // namespace Wrapper
+} // end namespace wrapper
 } // end namespace otb
 
 OTB_APPLICATION_EXPORT(otb::Wrapper::PatchesExtraction)
diff --git a/app/otbPatchesSelection.cxx b/app/otbPatchesSelection.cxx
index 1e423770..68d76221 100644
--- a/app/otbPatchesSelection.cxx
+++ b/app/otbPatchesSelection.cxx
@@ -36,17 +36,16 @@
 #include <limits>
 
 // Functor to retrieve nodata
-template <class TPixel, class OutputPixel>
+template<class TPixel, class OutputPixel>
 class IsNoData
 {
 public:
-  IsNoData() {}
-  ~IsNoData() {}
+  IsNoData(){}
+  ~IsNoData(){}
 
-  inline OutputPixel
-  operator()(const TPixel & A) const
+  inline OutputPixel operator()( const TPixel & A ) const
   {
-    for (unsigned int band = 0; band < A.Size(); band++)
+    for (unsigned int band = 0 ; band < A.Size() ; band++)
     {
       if (A[band] != m_NoDataValue)
         return 1;
@@ -54,8 +53,7 @@ public:
     return 0;
   }
 
-  void
-  SetNoDataValue(typename TPixel::ValueType value)
+  void SetNoDataValue(typename TPixel::ValueType value)
   {
     m_NoDataValue = value;
   }
@@ -74,48 +72,47 @@ class PatchesSelection : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef PatchesSelection              Self;
-  typedef Application                   Superclass;
-  typedef itk::SmartPointer<Self>       Pointer;
-  typedef itk::SmartPointer<const Self> ConstPointer;
+  typedef PatchesSelection                    Self;
+  typedef Application                         Superclass;
+  typedef itk::SmartPointer<Self>             Pointer;
+  typedef itk::SmartPointer<const Self>       ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(PatchesSelection, Application);
 
   /** Vector data typedefs */
-  typedef VectorDataType::DataTreeType            DataTreeType;
-  typedef itk::PreOrderTreeIterator<DataTreeType> TreeIteratorType;
-  typedef VectorDataType::DataNodeType            DataNodeType;
-  typedef DataNodeType::Pointer                   DataNodePointer;
-  typedef DataNodeType::PointType                 DataNodePointType;
+  typedef VectorDataType::DataTreeType                 DataTreeType;
+  typedef itk::PreOrderTreeIterator<DataTreeType>      TreeIteratorType;
+  typedef VectorDataType::DataNodeType                 DataNodeType;
+  typedef DataNodeType::Pointer                        DataNodePointer;
+  typedef DataNodeType::PointType                      DataNodePointType;
 
   /** typedefs */
-  typedef IsNoData<FloatVectorImageType::PixelType, UInt8ImageType::PixelType>                    IsNoDataFunctorType;
+  typedef IsNoData<FloatVectorImageType::PixelType, UInt8ImageType::PixelType > IsNoDataFunctorType;
   typedef itk::UnaryFunctorImageFilter<FloatVectorImageType, UInt8ImageType, IsNoDataFunctorType> IsNoDataFilterType;
 
-  typedef itk::FlatStructuringElement<2> StructuringType;
-  typedef StructuringType::RadiusType    RadiusType;
+  typedef itk::FlatStructuringElement<2>                                         StructuringType;
+  typedef StructuringType::RadiusType                                            RadiusType;
 
   typedef itk::BinaryErodeImageFilter<UInt8ImageType, UInt8ImageType, StructuringType> MorphoFilterType;
 
-  typedef otb::StreamingResampleImageFilter<UInt8ImageType, UInt8ImageType> PadFilterType;
-  typedef itk::NearestNeighborInterpolateImageFunction<UInt8ImageType>      NNInterpolatorType;
+  typedef otb::StreamingResampleImageFilter<UInt8ImageType,UInt8ImageType> PadFilterType;
+  typedef itk::NearestNeighborInterpolateImageFunction<UInt8ImageType> NNInterpolatorType;
 
   typedef tf::Distribution<UInt8ImageType> DistributionType;
 
   typedef itk::MaskImageFilter<UInt8ImageType, UInt8ImageType, UInt8ImageType> MaskImageFilterType;
 
-  void
-  DoInit()
+  void DoInit()
   {
 
     // Documentation
     SetName("PatchesSelection");
     SetDescription("This application generate points sampled at regular interval over "
-                   "the input image region. The grid size and spacing can be configured.");
+        "the input image region. The grid size and spacing can be configured.");
     SetDocLongDescription("This application produces a vector data containing "
-                          "a set of points centered on the patches lying in the valid regions of the input image. ");
+        "a set of points centered on the patches lying in the valid regions of the input image. ");
 
     SetDocAuthors("Remi Cresson");
 
@@ -126,25 +123,18 @@ public:
 
     // Input no-data value
     AddParameter(ParameterType_Float, "nodata", "nodata value");
-<<<<<<< HEAD
     MandatoryOff                     ("nodata");
-=======
-    MandatoryOn("nodata");
-    SetDefaultParameterFloat("nodata", 0);
-    AddParameter(ParameterType_Bool, "nocheck", "If on, no check on the validity of patches is performed");
-    MandatoryOff("nocheck");
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
 
     // Grid
     AddParameter(ParameterType_Group, "grid", "grid settings");
     AddParameter(ParameterType_Int, "grid.step", "step between patches");
-    SetMinimumParameterIntValue("grid.step", 1);
+    SetMinimumParameterIntValue    ("grid.step", 1);
     AddParameter(ParameterType_Int, "grid.psize", "patches size");
-    SetMinimumParameterIntValue("grid.psize", 1);
+    SetMinimumParameterIntValue    ("grid.psize", 1);
     AddParameter(ParameterType_Int, "grid.offsetx", "offset of the grid (x axis)");
-    SetDefaultParameterInt("grid.offsetx", 0);
+    SetDefaultParameterInt         ("grid.offsetx", 0);
     AddParameter(ParameterType_Int, "grid.offsety", "offset of the grid (y axis)");
-    SetDefaultParameterInt("grid.offsety", 0);
+    SetDefaultParameterInt         ("grid.offsety", 0);
 
     // Strategy
     AddParameter(ParameterType_Choice, "strategy", "Selection strategy for validation/training patches");
@@ -170,18 +160,16 @@ public:
     AddChoice("strategy.all", "All locations. Only the \"outtrain\" output parameter is used.");
     // Balanced (experimental)
     AddChoice("strategy.balanced", "you can chose the degree of spatial randomness vs class balance");
-    AddParameter(ParameterType_Float,
-                 "strategy.balanced.sp",
-                 "Spatial proportion: between 0 and 1, "
-                 "indicating the amount of randomly sampled data in space");
-    SetMinimumParameterFloatValue("strategy.balanced.sp", 0);
-    SetMaximumParameterFloatValue("strategy.balanced.sp", 1);
-    SetDefaultParameterFloat("strategy.balanced.sp", 0.25);
-    AddParameter(ParameterType_Int, "strategy.balanced.nclasses", "Number of classes");
-    SetMinimumParameterIntValue("strategy.balanced.nclasses", 2);
-    MandatoryOn("strategy.balanced.nclasses");
+    AddParameter(ParameterType_Float, "strategy.balanced.sp", "Spatial proportion: between 0 and 1, "
+        "indicating the amount of randomly sampled data in space");
+    SetMinimumParameterFloatValue    ("strategy.balanced.sp", 0);
+    SetMaximumParameterFloatValue    ("strategy.balanced.sp", 1);
+    SetDefaultParameterFloat         ("strategy.balanced.sp", 0.25);
+    AddParameter(ParameterType_Int,   "strategy.balanced.nclasses", "Number of classes");
+    SetMinimumParameterIntValue      ("strategy.balanced.nclasses", 2);
+    MandatoryOn                      ("strategy.balanced.nclasses");
     AddParameter(ParameterType_InputImage, "strategy.balanced.labelimage", "input label image");
-    MandatoryOn("strategy.balanced.labelimage");
+    MandatoryOn                           ("strategy.balanced.labelimage");
 
     // Output points
     AddParameter(ParameterType_OutputVectorData, "outtrain", "output set of points (training)");
@@ -191,12 +179,12 @@ public:
     MandatoryOff("outtest");
 
     AddRAMParameter();
+
   }
 
   class SampleBundle
   {
   public:
-<<<<<<< HEAD
     SampleBundle(){}
     explicit SampleBundle(unsigned int nClasses): dist(DistributionType(nClasses)), id(0), group(true){
       (void) point;
@@ -206,117 +194,72 @@ public:
 
     SampleBundle(const SampleBundle & other): dist(other.GetDistribution()), id(other.GetSampleID()),
       point(other.GetPosition()), group(other.GetGroup()), index(other.GetIndex())
-=======
-    SampleBundle() {}
-    explicit SampleBundle(unsigned int nClasses)
-      : dist(DistributionType(nClasses))
-      , id(0)
-      , black(true)
-    {
-      (void)point;
-      (void)index;
-    }
-    ~SampleBundle() {}
-
-    SampleBundle(const SampleBundle & other)
-      : dist(other.GetDistribution())
-      , id(other.GetSampleID())
-      , point(other.GetPosition())
-      , black(other.GetBlack())
-      , index(other.GetIndex())
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     {}
 
-    DistributionType
-    GetDistribution() const
+    DistributionType GetDistribution() const
     {
       return dist;
     }
 
-    DistributionType &
-    GetModifiableDistribution()
+    DistributionType& GetModifiableDistribution()
     {
       return dist;
     }
 
-    unsigned int
-    GetSampleID() const
+    unsigned int GetSampleID() const
     {
       return id;
     }
 
-    unsigned int &
-    GetModifiableSampleID()
+    unsigned int& GetModifiableSampleID()
     {
       return id;
     }
 
-    DataNodePointType
-    GetPosition() const
+    DataNodePointType GetPosition() const
     {
       return point;
     }
 
-    DataNodePointType &
-    GetModifiablePosition()
+    DataNodePointType& GetModifiablePosition()
     {
       return point;
     }
 
-<<<<<<< HEAD
     int& GetModifiableGroup()
-=======
-    bool &
-    GetModifiableBlack()
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     {
       return group;
     }
 
-<<<<<<< HEAD
     int GetGroup() const
-=======
-    bool
-    GetBlack() const
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     {
       return group;
     }
 
-    UInt8ImageType::IndexType &
-    GetModifiableIndex()
+    UInt8ImageType::IndexType& GetModifiableIndex()
     {
       return index;
     }
 
-    UInt8ImageType::IndexType
-    GetIndex() const
+    UInt8ImageType::IndexType GetIndex() const
     {
       return index;
     }
 
   private:
-<<<<<<< HEAD
 
     DistributionType dist;
     unsigned int id;
     DataNodePointType point;
     int group;
-=======
-    DistributionType          dist;
-    unsigned int              id;
-    DataNodePointType         point;
-    bool                      black;
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     UInt8ImageType::IndexType index;
   };
 
   /*
    * Apply the given function at each sampling location, checking if the patch is valid or not
    */
-  template <typename TLambda>
-  void
-  Apply(TLambda lambda)
+  template<typename TLambda>
+  void Apply(TLambda lambda)
   {
 
     int userOffX = GetParameterInt("grid.offsetx");
@@ -328,56 +271,49 @@ public:
 
     // Explicit streaming over the morphed mask, based on the RAM parameter
     typedef otb::RAMDrivenStrippedStreamingManager<UInt8ImageType> StreamingManagerType;
-    StreamingManagerType::Pointer                                  m_StreamingManager = StreamingManagerType::New();
+    StreamingManagerType::Pointer m_StreamingManager = StreamingManagerType::New();
     m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
 
     UInt8ImageType::Pointer inputImage;
-<<<<<<< HEAD
     bool readInput = true;
     if (!HasValue("nodata"))
       {
       otbAppLogINFO("No value specified for no-data. Input image pixels no-data values will not be checked.");
-=======
-    bool                    readInput = true;
-    if (GetParameterInt("nocheck") == 1)
-    {
-      otbAppLogINFO("\"nocheck\" mode is enabled. Input image pixels no-data values will not be checked.");
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
       if (HasValue("mask"))
-      {
+        {
         otbAppLogINFO("Using the provided \"mask\" parameter.");
         inputImage = GetParameterUInt8Image("mask");
-      }
+        }
       else
-      {
+        {
         // This is just a hack to not trigger the whole morpho/pad pipeline
         inputImage = m_NoDataFilter->GetOutput();
         readInput = false;
+        }
       }
-    }
     else
-    {
+      {
       inputImage = m_MorphoFilter->GetOutput();
 
       // Offset update because the morpho filter pads the input image with 1 pixel border
       userOffX += 1;
       userOffY += 1;
-    }
+      }
     UInt8ImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
     entireRegion.ShrinkByRadius(m_Radius);
-    m_StreamingManager->PrepareStreaming(inputImage, entireRegion);
+    m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
     UInt8ImageType::IndexType start;
     start[0] = m_Radius[0] + 1;
     start[1] = m_Radius[1] + 1;
 
-    int                            m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
-    UInt8ImageType::IndexType      pos;
+    int m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
+    UInt8ImageType::IndexType pos;
     UInt8ImageType::IndexValueType step = GetParameterInt("grid.step");
     pos.Fill(0);
 
     // Offset update
-    userOffX %= step;
-    userOffY %= step;
+    userOffX %= step ;
+    userOffY %= step ;
 
     for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
     {
@@ -385,7 +321,7 @@ public:
 
       UInt8ImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
       tf::PropagateRequestedRegion<UInt8ImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<UInt8ImageType> inIt(inputImage, streamRegion);
+      itk::ImageRegionConstIterator<UInt8ImageType> inIt (inputImage, streamRegion);
 
       for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
       {
@@ -421,6 +357,7 @@ public:
           }
         }
       }
+
     }
   }
 
@@ -432,24 +369,21 @@ public:
   {
     // Nb of samples (maximum)
     const UInt8ImageType::RegionType entireRegion = m_NoDataFilter->GetOutput()->GetLargestPossibleRegion();
-    const unsigned int maxNbOfCols = std::ceil(entireRegion.GetSize(0) / GetParameterInt("grid.step")) + 1;
-    const unsigned int maxNbOfRows = std::ceil(entireRegion.GetSize(1) / GetParameterInt("grid.step")) + 1;
-    unsigned int       maxNbOfSamples = 1;
+    const unsigned int maxNbOfCols = std::ceil(entireRegion.GetSize(0)/GetParameterInt("grid.step")) + 1;
+    const unsigned int maxNbOfRows = std::ceil(entireRegion.GetSize(1)/GetParameterInt("grid.step")) + 1;
+    unsigned int maxNbOfSamples = 1;
     maxNbOfSamples *= maxNbOfCols;
     maxNbOfSamples *= maxNbOfRows;
 
     // Nb of classes
-    SampleBundle              initSB(nbOfClasses);
+    SampleBundle initSB(nbOfClasses);
     std::vector<SampleBundle> bundles(maxNbOfSamples, initSB);
 
     return bundles;
   }
 
-  void
-  SetBlackOrWhiteBundle(SampleBundle &                    bundle,
-                        unsigned int &                    count,
-                        const UInt8ImageType::IndexType & pos,
-                        const UInt8ImageType::PointType & geo)
+  void SetBlackOrWhiteBundle(SampleBundle & bundle, unsigned int & count,
+      const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo)
   {
     // Black or white
     int black = (pos[0] + pos[1]) % 2;
@@ -459,21 +393,21 @@ public:
     bundle.GetModifiableGroup() = black;
     bundle.GetModifiableIndex() = pos;
     count++;
+
   }
 
   /*
    * Samples are placed at regular intervals with the same layout as a chessboard,
    * in two groups (A: black, B: white)
    */
-  void
-  SampleChessboard()
+  void SampleChessboard()
   {
 
     std::vector<SampleBundle> bundles = AllocateSamples();
 
     unsigned int count = 0;
-    auto         lambda = [this, &count, &bundles](const UInt8ImageType::IndexType & pos,
-                                           const UInt8ImageType::PointType & geo) {
+    auto lambda = [this, &count, &bundles]
+                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
       SetBlackOrWhiteBundle(bundles[count], count, pos, geo);
     };
 
@@ -484,7 +418,6 @@ public:
     PopulateVectorData(bundles);
   }
 
-<<<<<<< HEAD
   void SetSplitBundle(SampleBundle & bundle, unsigned int & count,
       const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo,
       const std::vector<int> & groups)
@@ -555,10 +488,6 @@ public:
   }
 
   void SampleBalanced()
-=======
-  void
-  SampleBalanced()
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
   {
 
     // 1. Compute distribution of all samples
@@ -571,13 +500,12 @@ public:
     UInt8ImageType::SizeType patchSize;
     patchSize.Fill(GetParameterInt("grid.psize"));
     unsigned int count = 0;
-    auto         lambda = [this, &bundles, &patchSize, &count](const UInt8ImageType::IndexType & pos,
-                                                       const UInt8ImageType::PointType & geo) {
+    auto lambda = [this, &bundles, &patchSize, &count]
+                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
+
       // Update this sample distribution
       if (tf::UpdateDistributionFromPatch<UInt8ImageType>(GetParameterUInt8Image("strategy.balanced.labelimage"),
-                                                          geo,
-                                                          patchSize,
-                                                          bundles[count].GetModifiableDistribution()))
+          geo, patchSize, bundles[count].GetModifiableDistribution()))
       {
         SetBlackOrWhiteBundle(bundles[count], count, pos, geo);
       }
@@ -586,7 +514,7 @@ public:
     Apply(lambda);
     bundles.resize(count);
 
-    otbAppLogINFO("Total number of candidates: " << count);
+    otbAppLogINFO("Total number of candidates: " << count );
 
     // 2. Seed = spatially random samples
 
@@ -596,13 +524,13 @@ public:
 
     otbAppLogINFO("Spatial sampling step " << samplingStep);
 
-    float                     step = 0;
+    float step = 0;
     std::vector<SampleBundle> seed(count);
     std::vector<SampleBundle> candidates(count);
 
     unsigned int seedCount = 0;
     unsigned int candidatesCount = 0;
-    for (auto & d : bundles)
+    for (auto& d: bundles)
     {
       if (d.GetIndex()[0] % samplingStep + d.GetIndex()[1] % samplingStep == 0)
       {
@@ -628,19 +556,18 @@ public:
 
     float removalRate = static_cast<float>(seedCount) / static_cast<float>(nbToRemove);
     float removalStep = 0;
-    auto  removeSamples = [&removalStep, &removalRate](SampleBundle & b) -> bool {
-      (void)b;
+    auto removeSamples = [&removalStep, &removalRate](SampleBundle & b) -> bool {
+      (void) b;
       bool ret = false;
       if (removalStep >= removalRate)
-      {
+        {
         removalStep = fmod(removalStep, removalRate);
         ret = true;
-      }
+        }
       else
         ret = false;
       removalStep++;
-      return ret;
-      ;
+      return ret;;
     };
     auto iterator = std::remove_if(seed.begin(), seed.end(), removeSamples);
     seed.erase(iterator, seed.end());
@@ -650,8 +577,8 @@ public:
     // 3. Compute seed distribution
 
     const unsigned int nbOfClasses = GetParameterInt("strategy.balanced.nclasses");
-    DistributionType   seedDist(nbOfClasses);
-    for (auto & d : seed)
+    DistributionType seedDist(nbOfClasses);
+    for (auto& d: seed)
       seedDist.Update(d.GetDistribution());
 
     otbAppLogINFO("Spatial seed distribution: " << seedDist.ToString());
@@ -661,16 +588,16 @@ public:
     otbAppLogINFO("Balance seed candidates size: " << candidates.size());
 
     // Sort by cos
-    auto comparator = [&seedDist](const SampleBundle & a, const SampleBundle & b) -> bool {
+    auto comparator = [&seedDist](const SampleBundle & a, const SampleBundle & b) -> bool{
       return a.GetDistribution().Cosinus(seedDist) > b.GetDistribution().Cosinus(seedDist);
     };
     sort(candidates.begin(), candidates.end(), comparator);
 
     DistributionType idealDist(nbOfClasses, 1.0 / std::sqrt(static_cast<float>(nbOfClasses)));
-    float            minCos = 0;
-    unsigned int     samplesAdded = 0;
-    seed.resize(seed.size() + candidates.size(), SampleBundle(nbOfClasses));
-    while (candidates.size() > 0)
+    float minCos = 0;
+    unsigned int samplesAdded = 0;
+    seed.resize(seed.size()+candidates.size(), SampleBundle(nbOfClasses));
+    while(candidates.size() > 0)
     {
       // Get the less correlated sample
       SampleBundle candidate = candidates.back();
@@ -702,13 +629,11 @@ public:
     PopulateVectorData(seed);
   }
 
-  void
-  PopulateVectorData(const std::vector<SampleBundle> & samples)
+  void PopulateVectorData(const std::vector<SampleBundle> & samples)
   {
     // Get data tree
     DataTreeType::Pointer treeTrain = m_OutVectorDataTrain->GetDataTree();
     DataTreeType::Pointer treeValid = m_OutVectorDataValid->GetDataTree();
-<<<<<<< HEAD
     DataTreeType::Pointer treeTest = m_OutVectorDataTest->GetDataTree();
     DataNodePointer rootTrain = treeTrain->GetRoot()->Get();
     DataNodePointer rootValid = treeValid->GetRoot()->Get();
@@ -716,12 +641,6 @@ public:
     DataNodePointer documentTrain = DataNodeType::New();
     DataNodePointer documentValid = DataNodeType::New();
     DataNodePointer documentTest = DataNodeType::New();
-=======
-    DataNodePointer       rootTrain = treeTrain->GetRoot()->Get();
-    DataNodePointer       rootValid = treeValid->GetRoot()->Get();
-    DataNodePointer       documentTrain = DataNodeType::New();
-    DataNodePointer       documentValid = DataNodeType::New();
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     documentTrain->SetNodeType(DOCUMENT);
     documentValid->SetNodeType(DOCUMENT);
     documentTest->SetNodeType(DOCUMENT);
@@ -730,7 +649,7 @@ public:
     treeTest->Add(documentTest, rootTest);
 
     unsigned int id = 0;
-    for (const auto & sample : samples)
+    for (const auto& sample: samples)
     {
       // Add point to the VectorData tree
       DataNodePointer newDataNode = DataNodeType::New();
@@ -749,20 +668,16 @@ public:
         // Valid
         treeValid->Add(newDataNode, documentValid);
       }
-<<<<<<< HEAD
       else if (sample.GetGroup() == 2)
       {
         // Test
         treeTest->Add(newDataNode, documentTest);
       }
 
-=======
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
     }
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
     otbAppLogINFO("Grid step : " << this->GetParameterInt("grid.step"));
     otbAppLogINFO("Patch size : " << this->GetParameterInt("grid.psize"));
@@ -781,7 +696,7 @@ public:
 
     // If mask available, use it
     if (HasValue("mask"))
-    {
+      {
       if (GetParameterUInt8Image("mask")->GetLargestPossibleRegion().GetSize() !=
           GetParameterFloatVectorImage("in")->GetLargestPossibleRegion().GetSize())
         otbAppLogFATAL("Mask must have the same size as the input image!");
@@ -790,24 +705,24 @@ public:
       m_MaskImageFilter->SetMaskImage(GetParameterUInt8Image("mask"));
       m_MaskImageFilter->UpdateOutputInformation();
       src = m_MaskImageFilter->GetOutput();
-    }
+      }
 
     // Padding 1 pixel
     UInt8ImageType::SizeType size = src->GetLargestPossibleRegion().GetSize();
     size[0] += 2;
     size[1] += 2;
     UInt8ImageType::SpacingType spacing = src->GetSignedSpacing();
-    UInt8ImageType::PointType   origin = src->GetOrigin();
+    UInt8ImageType::PointType origin = src->GetOrigin();
     origin[0] -= spacing[0];
     origin[1] -= spacing[1];
     m_PadFilter = PadFilterType::New();
     NNInterpolatorType::Pointer nnInterpolator = NNInterpolatorType::New();
     m_PadFilter->SetInterpolator(nnInterpolator);
-    m_PadFilter->SetInput(src);
+    m_PadFilter->SetInput( src );
     m_PadFilter->SetOutputOrigin(origin);
     m_PadFilter->SetOutputSpacing(spacing);
     m_PadFilter->SetOutputSize(size);
-    m_PadFilter->SetEdgePaddingValue(0);
+    m_PadFilter->SetEdgePaddingValue( 0 );
     m_PadFilter->UpdateOutputInformation();
 
     // Morpho
@@ -874,10 +789,9 @@ public:
       }
     }
 
-    otbAppLogINFO("Writing output samples positions");
+    otbAppLogINFO( "Writing output samples positions");
 
     SetParameterOutputVectorData("outtrain", m_OutVectorDataTrain);
-<<<<<<< HEAD
     if (HasValue("outvalid") && GetParameterAsString("strategy") != "all")
     {
       SetParameterOutputVectorData("outvalid", m_OutVectorDataValid);
@@ -887,15 +801,12 @@ public:
       SetParameterOutputVectorData("outtest", m_OutVectorDataTest);
     }
 
-=======
-    SetParameterOutputVectorData("outvalid", m_OutVectorDataValid);
->>>>>>> 8ad5e814243888b26b7ac3983f6745d849905f9c
   }
 
 
-  void
-  DoUpdateParameters()
-  {}
+  void DoUpdateParameters()
+  {
+  }
 
 private:
   RadiusType                   m_Radius;
@@ -908,7 +819,7 @@ private:
   MaskImageFilterType::Pointer m_MaskImageFilter;
 }; // end of class
 
-} // namespace Wrapper
+} // end namespace wrapper
 } // end namespace otb
 
-OTB_APPLICATION_EXPORT(otb::Wrapper::PatchesSelection)
+OTB_APPLICATION_EXPORT( otb::Wrapper::PatchesSelection )
diff --git a/app/otbTensorflowModelServe.cxx b/app/otbTensorflowModelServe.cxx
index b9f74dfc..47a8c957 100644
--- a/app/otbTensorflowModelServe.cxx
+++ b/app/otbTensorflowModelServe.cxx
@@ -42,10 +42,10 @@ class TensorflowModelServe : public Application
 {
 public:
   /** Standard class typedefs. */
-  typedef TensorflowModelServe          Self;
-  typedef Application                   Superclass;
-  typedef itk::SmartPointer<Self>       Pointer;
-  typedef itk::SmartPointer<const Self> ConstPointer;
+  typedef TensorflowModelServe                       Self;
+  typedef Application                                Superclass;
+  typedef itk::SmartPointer<Self>                    Pointer;
+  typedef itk::SmartPointer<const Self>              ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
@@ -53,10 +53,10 @@ public:
 
   /** Typedefs for tensorflow */
   typedef otb::TensorflowMultisourceModelFilter<FloatVectorImageType, FloatVectorImageType> TFModelFilterType;
-  typedef otb::TensorflowSource<FloatVectorImageType>                                       InputImageSource;
+  typedef otb::TensorflowSource<FloatVectorImageType> InputImageSource;
 
   /** Typedef for streaming */
-  typedef otb::ImageRegionSquareTileSplitter<FloatVectorImageType::ImageDimension>  TileSplitterType;
+  typedef otb::ImageRegionSquareTileSplitter<FloatVectorImageType::ImageDimension> TileSplitterType;
   typedef otb::TensorflowStreamerFilter<FloatVectorImageType, FloatVectorImageType> StreamingFilterType;
 
   /** Typedefs for images */
@@ -83,164 +83,152 @@ public:
   // -an input image list
   // -an input patchsize (dimensions of samples)
   //
-  void
-  AddAnInputImage()
+  void AddAnInputImage()
   {
     // Number of source
     unsigned int inputNumber = m_Bundles.size() + 1;
 
     // Create keys and descriptions
-    std::stringstream ss_key_group, ss_desc_group, ss_key_in, ss_desc_in, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y,
-      ss_desc_dims_y, ss_key_ph, ss_desc_ph;
+    std::stringstream ss_key_group, ss_desc_group,
+    ss_key_in, ss_desc_in,
+    ss_key_dims_x, ss_desc_dims_x,
+    ss_key_dims_y, ss_desc_dims_y,
+    ss_key_ph, ss_desc_ph;
 
     // Parameter group key/description
-    ss_key_group << "source" << inputNumber;
+    ss_key_group  << "source"                  << inputNumber;
     ss_desc_group << "Parameters for source #" << inputNumber;
 
     // Parameter group keys
-    ss_key_in << ss_key_group.str() << ".il";
-    ss_key_dims_x << ss_key_group.str() << ".rfieldx";
-    ss_key_dims_y << ss_key_group.str() << ".rfieldy";
-    ss_key_ph << ss_key_group.str() << ".placeholder";
+    ss_key_in      << ss_key_group.str() << ".il";
+    ss_key_dims_x  << ss_key_group.str() << ".rfieldx";
+    ss_key_dims_y  << ss_key_group.str() << ".rfieldy";
+    ss_key_ph      << ss_key_group.str() << ".placeholder";
 
     // Parameter group descriptions
-    ss_desc_in << "Input image (or list to stack) for source #" << inputNumber;
-    ss_desc_dims_x << "Input receptive field (width) for source #" << inputNumber;
+    ss_desc_in     << "Input image (or list to stack) for source #" << inputNumber;
+    ss_desc_dims_x << "Input receptive field (width) for source #"  << inputNumber;
     ss_desc_dims_y << "Input receptive field (height) for source #" << inputNumber;
-    ss_desc_ph << "Name of the input placeholder for source #" << inputNumber;
+    ss_desc_ph     << "Name of the input placeholder for source #"  << inputNumber;
 
     // Populate group
-    AddParameter(ParameterType_Group, ss_key_group.str(), ss_desc_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_in.str(), ss_desc_in.str());
-    AddParameter(ParameterType_Int, ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue(ss_key_dims_x.str(), 1);
-    SetDefaultParameterInt(ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int, ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue(ss_key_dims_y.str(), 1);
-    SetDefaultParameterInt(ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_String, ss_key_ph.str(), ss_desc_ph.str());
-    MandatoryOff(ss_key_ph.str());
+    AddParameter(ParameterType_Group,          ss_key_group.str(),  ss_desc_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_in.str(),     ss_desc_in.str() );
+    AddParameter(ParameterType_Int,            ss_key_dims_x.str(), ss_desc_dims_x.str());
+    SetMinimumParameterIntValue               (ss_key_dims_x.str(), 1);
+    SetDefaultParameterInt                    (ss_key_dims_x.str(), 1);
+    AddParameter(ParameterType_Int,            ss_key_dims_y.str(), ss_desc_dims_y.str());
+    SetMinimumParameterIntValue               (ss_key_dims_y.str(), 1);
+    SetDefaultParameterInt                    (ss_key_dims_y.str(), 1);
+    AddParameter(ParameterType_String,         ss_key_ph.str(),     ss_desc_ph.str());
+    MandatoryOff                              (ss_key_ph.str());
 
     // Add a new bundle
     ProcessObjectsBundle bundle;
-    bundle.m_KeyIn = ss_key_in.str();
-    bundle.m_KeyPszX = ss_key_dims_x.str();
-    bundle.m_KeyPszY = ss_key_dims_y.str();
+    bundle.m_KeyIn     = ss_key_in.str();
+    bundle.m_KeyPszX   = ss_key_dims_x.str();
+    bundle.m_KeyPszY   = ss_key_dims_y.str();
     bundle.m_KeyPHName = ss_key_ph.str();
 
     m_Bundles.push_back(bundle);
+
   }
 
-  void
-  DoInit()
+  void DoInit()
   {
 
     // Documentation
     SetName("TensorflowModelServe");
-    SetDescription("Multisource deep learning classifier using TensorFlow. Change the " + tf::ENV_VAR_NAME_NSOURCES +
-                   " environment variable to set the number of sources.");
+    SetDescription("Multisource deep learning classifier using TensorFlow. Change the "
+        + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of sources.");
     SetDocLongDescription("The application run a TensorFlow model over multiple data sources. "
-                          "The number of input sources can be changed at runtime by setting the system "
-                          "environment variable " +
-                          tf::ENV_VAR_NAME_NSOURCES +
-                          ". For each source, you have to "
-                          "set (1) the placeholder name, as named in the TensorFlow model, (2) the receptive "
-                          "field and (3) the image(s) source. The output is a multiband image, stacking all "
-                          "outputs tensors together: you have to specify (1) the names of the output tensors, as "
-                          "named in the TensorFlow model (typically, an operator's output) and (2) the expression "
-                          "field of each output tensor. The output tensors values will be stacked in the same "
-                          "order as they appear in the \"model.output\" parameter (you can use a space separator "
-                          "between names). You might consider to use extended filename to bypass the automatic "
-                          "memory footprint calculator of the otb application engine, and set a good splitting "
-                          "strategy (Square tiles is good for convolutional networks) or use the \"optim\" "
-                          "parameter group to impose your squared tiles sizes");
+        "The number of input sources can be changed at runtime by setting the system "
+        "environment variable " + tf::ENV_VAR_NAME_NSOURCES + ". For each source, you have to "
+        "set (1) the placeholder name, as named in the TensorFlow model, (2) the receptive "
+        "field and (3) the image(s) source. The output is a multiband image, stacking all "
+        "outputs tensors together: you have to specify (1) the names of the output tensors, as "
+        "named in the TensorFlow model (typically, an operator's output) and (2) the expression "
+        "field of each output tensor. The output tensors values will be stacked in the same "
+        "order as they appear in the \"model.output\" parameter (you can use a space separator "
+        "between names). You might consider to use extended filename to bypass the automatic "
+        "memory footprint calculator of the otb application engine, and set a good splitting "
+        "strategy (Square tiles is good for convolutional networks) or use the \"optim\" "
+        "parameter group to impose your squared tiles sizes");
     SetDocAuthors("Remi Cresson");
 
     AddDocTag(Tags::Learning);
 
     // Input/output images
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources(); i++)
+    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
       AddAnInputImage();
 
     // Input model
-    AddParameter(ParameterType_Group, "model", "model parameters");
-    AddParameter(ParameterType_Directory, "model.dir", "TensorFlow SavedModel directory");
-    MandatoryOn("model.dir");
-    SetParameterDescription("model.dir",
-                            "The model directory should contains the model Google Protobuf (.pb) and variables");
-
-    AddParameter(ParameterType_StringList,
-                 "model.userplaceholders",
-                 "Additional single-valued placeholders. Supported types: int, float, bool.");
-    MandatoryOff("model.userplaceholders");
-    SetParameterDescription("model.userplaceholders",
-                            "Syntax to use is \"placeholder_1=value_1 ... placeholder_N=value_N\"");
-    AddParameter(ParameterType_Bool, "model.fullyconv", "Fully convolutional");
-    MandatoryOff("model.fullyconv");
-    AddParameter(ParameterType_StringList,
-                 "model.tagsets",
-                 "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is "
-                 "supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
-    MandatoryOff("model.tagsets");
+    AddParameter(ParameterType_Group,         "model",           "model parameters");
+    AddParameter(ParameterType_Directory,     "model.dir",       "TensorFlow SavedModel directory");
+    MandatoryOn                              ("model.dir");
+    SetParameterDescription                  ("model.dir", "The model directory should contains the model Google Protobuf (.pb) and variables");
+
+    AddParameter(ParameterType_StringList,    "model.userplaceholders",    "Additional single-valued placeholders. Supported types: int, float, bool.");
+    MandatoryOff                             ("model.userplaceholders");
+    SetParameterDescription                  ("model.userplaceholders", "Syntax to use is \"placeholder_1=value_1 ... placeholder_N=value_N\"");
+    AddParameter(ParameterType_Bool,          "model.fullyconv", "Fully convolutional");
+    MandatoryOff                             ("model.fullyconv");
+    AddParameter(ParameterType_StringList,    "model.tagsets",    "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
+    MandatoryOff                             ("model.tagsets");
 
     // Output tensors parameters
-    AddParameter(ParameterType_Group, "output", "Output tensors parameters");
-    AddParameter(ParameterType_Float, "output.spcscale", "The output spacing scale, related to the first input");
-    SetDefaultParameterFloat("output.spcscale", 1.0);
-    SetParameterDescription(
-      "output.spcscale",
-      "The output image size/scale and spacing*scale where size and spacing corresponds to the first input");
-    AddParameter(ParameterType_StringList, "output.names", "Names of the output tensors");
-    MandatoryOff("output.names");
+    AddParameter(ParameterType_Group,         "output",          "Output tensors parameters");
+    AddParameter(ParameterType_Float,         "output.spcscale", "The output spacing scale, related to the first input");
+    SetDefaultParameterFloat                 ("output.spcscale", 1.0);
+    SetParameterDescription                  ("output.spcscale", "The output image size/scale and spacing*scale where size and spacing corresponds to the first input");
+    AddParameter(ParameterType_StringList,    "output.names",    "Names of the output tensors");
+    MandatoryOff                            ("output.names");
 
     // Output Field of Expression
-    AddParameter(ParameterType_Int, "output.efieldx", "The output expression field (width)");
-    SetMinimumParameterIntValue("output.efieldx", 1);
-    SetDefaultParameterInt("output.efieldx", 1);
-    MandatoryOn("output.efieldx");
-    AddParameter(ParameterType_Int, "output.efieldy", "The output expression field (height)");
-    SetMinimumParameterIntValue("output.efieldy", 1);
-    SetDefaultParameterInt("output.efieldy", 1);
-    MandatoryOn("output.efieldy");
+    AddParameter(ParameterType_Int,           "output.efieldx", "The output expression field (width)");
+    SetMinimumParameterIntValue              ("output.efieldx", 1);
+    SetDefaultParameterInt                   ("output.efieldx", 1);
+    MandatoryOn                              ("output.efieldx");
+    AddParameter(ParameterType_Int,           "output.efieldy", "The output expression field (height)");
+    SetMinimumParameterIntValue              ("output.efieldy", 1);
+    SetDefaultParameterInt                   ("output.efieldy", 1);
+    MandatoryOn                              ("output.efieldy");
 
     // Fine tuning
-    AddParameter(ParameterType_Group, "optim", "This group of parameters allows optimization of processing time");
-    AddParameter(ParameterType_Bool, "optim.disabletiling", "Disable tiling");
-    MandatoryOff("optim.disabletiling");
-    SetParameterDescription(
-      "optim.disabletiling",
-      "Tiling avoids to process a too large subset of image, but sometimes it can be useful to disable it");
-    AddParameter(ParameterType_Int, "optim.tilesizex", "Tile width used to stream the filter output");
-    SetMinimumParameterIntValue("optim.tilesizex", 1);
-    SetDefaultParameterInt("optim.tilesizex", 16);
-    AddParameter(ParameterType_Int, "optim.tilesizey", "Tile height used to stream the filter output");
-    SetMinimumParameterIntValue("optim.tilesizey", 1);
-    SetDefaultParameterInt("optim.tilesizey", 16);
+    AddParameter(ParameterType_Group,         "optim" , "This group of parameters allows optimization of processing time");
+    AddParameter(ParameterType_Bool,          "optim.disabletiling", "Disable tiling");
+    MandatoryOff                             ("optim.disabletiling");
+    SetParameterDescription                  ("optim.disabletiling", "Tiling avoids to process a too large subset of image, but sometimes it can be useful to disable it");
+    AddParameter(ParameterType_Int,           "optim.tilesizex", "Tile width used to stream the filter output");
+    SetMinimumParameterIntValue              ("optim.tilesizex", 1);
+    SetDefaultParameterInt                   ("optim.tilesizex", 16);
+    AddParameter(ParameterType_Int,           "optim.tilesizey", "Tile height used to stream the filter output");
+    SetMinimumParameterIntValue              ("optim.tilesizey", 1);
+    SetDefaultParameterInt                   ("optim.tilesizey", 16);
 
     // Output image
     AddParameter(ParameterType_OutputImage, "out", "output image");
 
     // Example
-    SetDocExampleParameterValue("source1.il", "spot6pms.tif");
-    SetDocExampleParameterValue("source1.placeholder", "x1");
-    SetDocExampleParameterValue("source1.rfieldx", "16");
-    SetDocExampleParameterValue("source1.rfieldy", "16");
-    SetDocExampleParameterValue("model.dir", "/tmp/my_saved_model/");
+    SetDocExampleParameterValue("source1.il",             "spot6pms.tif");
+    SetDocExampleParameterValue("source1.placeholder",    "x1");
+    SetDocExampleParameterValue("source1.rfieldx",        "16");
+    SetDocExampleParameterValue("source1.rfieldy",        "16");
+    SetDocExampleParameterValue("model.dir",              "/tmp/my_saved_model/");
     SetDocExampleParameterValue("model.userplaceholders", "is_training=false dropout=0.0");
-    SetDocExampleParameterValue("output.names", "out_predict1 out_proba1");
-    SetDocExampleParameterValue(
-      "out", "\"classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256\"");
+    SetDocExampleParameterValue("output.names",           "out_predict1 out_proba1");
+    SetDocExampleParameterValue("out",                    "\"classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256\"");
+
   }
 
   //
   // Prepare bundles from the number of points
   //
-  void
-  PrepareInputs()
+  void PrepareInputs()
   {
 
-    for (auto & bundle : m_Bundles)
+    for (auto& bundle: m_Bundles)
     {
       // Setting the image source
       FloatVectorImageListType::Pointer list = GetParameterImageList(bundle.m_KeyIn);
@@ -250,13 +238,12 @@ public:
       bundle.m_PatchSize[1] = GetParameterInt(bundle.m_KeyPszY);
 
       otbAppLogINFO("Source info :");
-      otbAppLogINFO("Receptive field  : " << bundle.m_PatchSize);
+      otbAppLogINFO("Receptive field  : " << bundle.m_PatchSize  );
       otbAppLogINFO("Placeholder name : " << bundle.m_Placeholder);
     }
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
 
     // Load the Tensorflow bundle
@@ -274,8 +261,8 @@ public:
 
     // Get user placeholders
     TFModelFilterType::StringList expressions = GetParameterStringList("model.userplaceholders");
-    TFModelFilterType::DictType   dict;
-    for (auto & exp : expressions)
+    TFModelFilterType::DictType dict;
+    for (auto& exp: expressions)
     {
       TFModelFilterType::DictElementType entry = tf::ExpressionToTensor(exp);
       dict.push_back(entry);
@@ -285,13 +272,13 @@ public:
     m_TFFilter->SetUserPlaceholders(dict);
 
     // Input sources
-    for (auto & bundle : m_Bundles)
+    for (auto& bundle: m_Bundles)
     {
       m_TFFilter->PushBackInputTensorBundle(bundle.m_Placeholder, bundle.m_PatchSize, bundle.m_ImageSource.Get());
     }
 
     // Fully convolutional mode on/off
-    if (GetParameterInt("model.fullyconv") == 1)
+    if (GetParameterInt("model.fullyconv")==1)
     {
       otbAppLogINFO("The TensorFlow model is used in fully convolutional mode");
       m_TFFilter->SetFullyConvolutional(true);
@@ -301,7 +288,7 @@ public:
     FloatVectorImageType::SizeType foe;
     foe[0] = GetParameterInt("output.efieldx");
     foe[1] = GetParameterInt("output.efieldy");
-    m_TFFilter->SetOutputExpressionFields({ foe });
+    m_TFFilter->SetOutputExpressionFields({foe});
 
     otbAppLogINFO("Output field of expression: " << m_TFFilter->GetOutputExpressionFields()[0]);
 
@@ -314,22 +301,22 @@ public:
       tileSize[1] = GetParameterInt("optim.tilesizey");
 
       // Check that the tile size is aligned to the field of expression
-      for (unsigned int i = 0; i < FloatVectorImageType::ImageDimension; i++)
+      for (unsigned int i = 0 ; i < FloatVectorImageType::ImageDimension ; i++)
         if (tileSize[i] % foe[i] != 0)
-        {
+          {
           SizeType::SizeValueType newSize = 1 + std::floor(tileSize[i] / foe[i]);
           newSize *= foe[i];
 
           otbAppLogWARNING("Aligning the tiling to the output expression field "
-                           << "for better performances (dim " << i << "). New value set to " << newSize)
+              << "for better performances (dim " << i << "). New value set to " << newSize)
 
-            tileSize[i] = newSize;
-        }
+          tileSize[i] = newSize;
+          }
 
       otbAppLogINFO("Force tiling with squared tiles of " << tileSize)
 
-        // Force the computation tile by tile
-        m_StreamFilter = StreamingFilterType::New();
+      // Force the computation tile by tile
+      m_StreamFilter = StreamingFilterType::New();
       m_StreamFilter->SetOutputGridSize(tileSize);
       m_StreamFilter->SetInput(m_TFFilter->GetOutput());
 
@@ -341,13 +328,14 @@ public:
       SetParameterOutputImage("out", m_TFFilter->GetOutput());
     }
   }
+  
 
-
-  void
-  DoUpdateParameters()
-  {}
+  void DoUpdateParameters()
+  {
+  }
 
 private:
+
   TFModelFilterType::Pointer   m_TFFilter;
   StreamingFilterType::Pointer m_StreamFilter;
   tensorflow::SavedModelBundle m_SavedModel; // must be alive during all the execution of the application !
@@ -356,7 +344,7 @@ private:
 
 }; // end of class
 
-} // namespace Wrapper
+} // namespace wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT(otb::Wrapper::TensorflowModelServe)
+OTB_APPLICATION_EXPORT( otb::Wrapper::TensorflowModelServe )
diff --git a/app/otbTensorflowModelTrain.cxx b/app/otbTensorflowModelTrain.cxx
index f5a420a9..e7901998 100644
--- a/app/otbTensorflowModelTrain.cxx
+++ b/app/otbTensorflowModelTrain.cxx
@@ -42,11 +42,12 @@ namespace Wrapper
 class TensorflowModelTrain : public Application
 {
 public:
+
   /** Standard class typedefs. */
-  typedef TensorflowModelTrain          Self;
-  typedef Application                   Superclass;
-  typedef itk::SmartPointer<Self>       Pointer;
-  typedef itk::SmartPointer<const Self> ConstPointer;
+  typedef TensorflowModelTrain                       Self;
+  typedef Application                                Superclass;
+  typedef itk::SmartPointer<Self>                    Pointer;
+  typedef itk::SmartPointer<const Self>              ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
@@ -76,8 +77,8 @@ public:
     std::string m_KeyInForValid;     // Key of input image list (validation)
     std::string m_KeyPHNameForTrain; // Key for placeholder name in the TensorFlow model (training)
     std::string m_KeyPHNameForValid; // Key for placeholder name in the TensorFlow model (validation)
-    std::string m_KeyPszX;           // Key for samples sizes X
-    std::string m_KeyPszY;           // Key for samples sizes Y
+    std::string m_KeyPszX;   // Key for samples sizes X
+    std::string m_KeyPszY;   // Key for samples sizes Y
   };
 
   /** Typedefs for the app */
@@ -85,9 +86,9 @@ public:
   typedef std::vector<FloatVectorImageType::SizeType> SizeList;
   typedef std::vector<std::string>                    StringList;
 
-  void
-  DoUpdateParameters()
-  {}
+  void DoUpdateParameters()
+  {
+  }
 
   //
   // Add an input source, which includes:
@@ -97,161 +98,151 @@ public:
   // -an input image placeholder (for validation)
   // -an input patchsize, which is the dimensions of samples. Same for training and validation.
   //
-  void
-  AddAnInputImage()
+  void AddAnInputImage()
   {
     // Number of source
     unsigned int inputNumber = m_Bundles.size() + 1;
 
     // Create keys and descriptions
-    std::stringstream ss_key_tr_group, ss_desc_tr_group, ss_key_val_group, ss_desc_val_group, ss_key_tr_in,
-      ss_desc_tr_in, ss_key_val_in, ss_desc_val_in, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y,
-      ss_key_tr_ph, ss_desc_tr_ph, ss_key_val_ph, ss_desc_val_ph;
+    std::stringstream ss_key_tr_group, ss_desc_tr_group,
+    ss_key_val_group, ss_desc_val_group,
+    ss_key_tr_in, ss_desc_tr_in,
+    ss_key_val_in, ss_desc_val_in,
+    ss_key_dims_x, ss_desc_dims_x,
+    ss_key_dims_y, ss_desc_dims_y,
+    ss_key_tr_ph, ss_desc_tr_ph,
+    ss_key_val_ph, ss_desc_val_ph;
 
     // Parameter group key/description
-    ss_key_tr_group << "training.source" << inputNumber;
-    ss_key_val_group << "validation.source" << inputNumber;
-    ss_desc_tr_group << "Parameters for source #" << inputNumber << " (training)";
+    ss_key_tr_group   << "training.source"         << inputNumber;
+    ss_key_val_group  << "validation.source"       << inputNumber;
+    ss_desc_tr_group  << "Parameters for source #" << inputNumber << " (training)";
     ss_desc_val_group << "Parameters for source #" << inputNumber << " (validation)";
 
     // Parameter group keys
-    ss_key_tr_in << ss_key_tr_group.str() << ".il";
-    ss_key_val_in << ss_key_val_group.str() << ".il";
-    ss_key_dims_x << ss_key_tr_group.str() << ".patchsizex";
-    ss_key_dims_y << ss_key_tr_group.str() << ".patchsizey";
-    ss_key_tr_ph << ss_key_tr_group.str() << ".placeholder";
-    ss_key_val_ph << ss_key_val_group.str() << ".name";
+    ss_key_tr_in   << ss_key_tr_group.str()  << ".il";
+    ss_key_val_in  << ss_key_val_group.str() << ".il";
+    ss_key_dims_x  << ss_key_tr_group.str()  << ".patchsizex";
+    ss_key_dims_y  << ss_key_tr_group.str()  << ".patchsizey";
+    ss_key_tr_ph   << ss_key_tr_group.str()  << ".placeholder";
+    ss_key_val_ph  << ss_key_val_group.str() << ".name";
 
     // Parameter group descriptions
-    ss_desc_tr_in << "Input image (or list to stack) for source #" << inputNumber << " (training)";
+    ss_desc_tr_in  << "Input image (or list to stack) for source #" << inputNumber << " (training)";
     ss_desc_val_in << "Input image (or list to stack) for source #" << inputNumber << " (validation)";
-    ss_desc_dims_x << "Patch size (x) for source #" << inputNumber;
-    ss_desc_dims_y << "Patch size (y) for source #" << inputNumber;
-    ss_desc_tr_ph << "Name of the input placeholder for source #" << inputNumber << " (training)";
+    ss_desc_dims_x << "Patch size (x) for source #"                 << inputNumber;
+    ss_desc_dims_y << "Patch size (y) for source #"                 << inputNumber;
+    ss_desc_tr_ph  << "Name of the input placeholder for source #"  << inputNumber << " (training)";
     ss_desc_val_ph << "Name of the input placeholder "
-                      "or output tensor for source #"
-                   << inputNumber << " (validation)";
+        "or output tensor for source #"                             << inputNumber << " (validation)";
 
     // Populate group
-    AddParameter(ParameterType_Group, ss_key_tr_group.str(), ss_desc_tr_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_tr_in.str(), ss_desc_tr_in.str());
-    AddParameter(ParameterType_Int, ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue(ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int, ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue(ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_String, ss_key_tr_ph.str(), ss_desc_tr_ph.str());
-    AddParameter(ParameterType_Group, ss_key_val_group.str(), ss_desc_val_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_val_in.str(), ss_desc_val_in.str());
-    AddParameter(ParameterType_String, ss_key_val_ph.str(), ss_desc_val_ph.str());
+    AddParameter(ParameterType_Group,          ss_key_tr_group.str(),  ss_desc_tr_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_tr_in.str(),     ss_desc_tr_in.str() );
+    AddParameter(ParameterType_Int,            ss_key_dims_x.str(),    ss_desc_dims_x.str());
+    SetMinimumParameterIntValue               (ss_key_dims_x.str(),    1);
+    AddParameter(ParameterType_Int,            ss_key_dims_y.str(),    ss_desc_dims_y.str());
+    SetMinimumParameterIntValue               (ss_key_dims_y.str(),    1);
+    AddParameter(ParameterType_String,         ss_key_tr_ph.str(),     ss_desc_tr_ph.str());
+    AddParameter(ParameterType_Group,          ss_key_val_group.str(), ss_desc_val_group.str());
+    AddParameter(ParameterType_InputImageList, ss_key_val_in.str(),    ss_desc_val_in.str() );
+    AddParameter(ParameterType_String,         ss_key_val_ph.str(),    ss_desc_val_ph.str());
 
     // Add a new bundle
     ProcessObjectsBundle bundle;
-    bundle.m_KeyInForTrain = ss_key_tr_in.str();
-    bundle.m_KeyInForValid = ss_key_val_in.str();
+    bundle.m_KeyInForTrain     = ss_key_tr_in.str();
+    bundle.m_KeyInForValid     = ss_key_val_in.str();
     bundle.m_KeyPHNameForTrain = ss_key_tr_ph.str();
     bundle.m_KeyPHNameForValid = ss_key_val_ph.str();
-    bundle.m_KeyPszX = ss_key_dims_x.str();
-    bundle.m_KeyPszY = ss_key_dims_y.str();
+    bundle.m_KeyPszX           = ss_key_dims_x.str();
+    bundle.m_KeyPszY           = ss_key_dims_y.str();
 
     m_Bundles.push_back(bundle);
   }
 
-  void
-  DoInit()
+  void DoInit()
   {
 
     // Documentation
     SetName("TensorflowModelTrain");
     SetDescription("Train a multisource deep learning net using Tensorflow. Change "
-                   "the " +
-                   tf::ENV_VAR_NAME_NSOURCES +
-                   " environment variable to set the number of "
-                   "sources.");
+        "the " + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of "
+        "sources.");
     SetDocLongDescription("The application trains a Tensorflow model over multiple data sources. "
-                          "The number of input sources can be changed at runtime by setting the "
-                          "system environment variable " +
-                          tf::ENV_VAR_NAME_NSOURCES +
-                          ". "
-                          "For each source, you have to set (1) the tensor placeholder name, as named in "
-                          "the tensorflow model, (2) the patch size and (3) the image(s) source. ");
+        "The number of input sources can be changed at runtime by setting the "
+        "system environment variable " + tf::ENV_VAR_NAME_NSOURCES + ". "
+        "For each source, you have to set (1) the tensor placeholder name, as named in "
+        "the tensorflow model, (2) the patch size and (3) the image(s) source. ");
     SetDocAuthors("Remi Cresson");
 
     AddDocTag(Tags::Learning);
 
     // Input model
-    AddParameter(ParameterType_Group, "model", "Model parameters");
-    AddParameter(ParameterType_Directory, "model.dir", "Tensorflow model_save directory");
-    MandatoryOn("model.dir");
-    AddParameter(ParameterType_String, "model.restorefrom", "Restore model from path");
-    MandatoryOff("model.restorefrom");
-    AddParameter(ParameterType_String, "model.saveto", "Save model to path");
-    MandatoryOff("model.saveto");
-    AddParameter(ParameterType_StringList,
-                 "model.tagsets",
-                 "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is "
-                 "supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
-    MandatoryOff("model.tagsets");
+    AddParameter(ParameterType_Group,       "model",              "Model parameters");
+    AddParameter(ParameterType_Directory,   "model.dir",          "Tensorflow model_save directory");
+    MandatoryOn                            ("model.dir");
+    AddParameter(ParameterType_String,      "model.restorefrom",  "Restore model from path");
+    MandatoryOff                           ("model.restorefrom");
+    AddParameter(ParameterType_String,      "model.saveto",       "Save model to path");
+    MandatoryOff                           ("model.saveto");
+    AddParameter(ParameterType_StringList,  "model.tagsets",    "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
+    MandatoryOff                           ("model.tagsets");
 
     // Training parameters group
-    AddParameter(ParameterType_Group, "training", "Training parameters");
-    AddParameter(ParameterType_Int, "training.batchsize", "Batch size");
-    SetMinimumParameterIntValue("training.batchsize", 1);
-    SetDefaultParameterInt("training.batchsize", 100);
-    AddParameter(ParameterType_Int, "training.epochs", "Number of epochs");
-    SetMinimumParameterIntValue("training.epochs", 1);
-    SetDefaultParameterInt("training.epochs", 100);
-    AddParameter(ParameterType_StringList,
-                 "training.userplaceholders",
+    AddParameter(ParameterType_Group,       "training",           "Training parameters");
+    AddParameter(ParameterType_Int,         "training.batchsize", "Batch size");
+    SetMinimumParameterIntValue            ("training.batchsize", 1);
+    SetDefaultParameterInt                 ("training.batchsize", 100);
+    AddParameter(ParameterType_Int,         "training.epochs",    "Number of epochs");
+    SetMinimumParameterIntValue            ("training.epochs",    1);
+    SetDefaultParameterInt                 ("training.epochs",    100);
+    AddParameter(ParameterType_StringList,  "training.userplaceholders",
                  "Additional single-valued placeholders for training. Supported types: int, float, bool.");
-    MandatoryOff("training.userplaceholders");
-    AddParameter(ParameterType_StringList, "training.targetnodes", "Names of the target nodes");
-    MandatoryOn("training.targetnodes");
-    AddParameter(ParameterType_StringList, "training.outputtensors", "Names of the output tensors to display");
-    MandatoryOff("training.outputtensors");
-    AddParameter(ParameterType_Bool,
-                 "training.usestreaming",
-                 "Use the streaming through patches (slower but can process big dataset)");
-    MandatoryOff("training.usestreaming");
+    MandatoryOff                           ("training.userplaceholders");
+    AddParameter(ParameterType_StringList,  "training.targetnodes",    "Names of the target nodes");
+    MandatoryOn                            ("training.targetnodes");
+    AddParameter(ParameterType_StringList,  "training.outputtensors",  "Names of the output tensors to display");
+    MandatoryOff                           ("training.outputtensors");
+    AddParameter(ParameterType_Bool,        "training.usestreaming",   "Use the streaming through patches (slower but can process big dataset)");
+    MandatoryOff                           ("training.usestreaming");
 
     // Metrics
-    AddParameter(ParameterType_Group, "validation", "Validation parameters");
-    MandatoryOff("validation");
-    AddParameter(ParameterType_Int, "validation.step", "Perform the validation every Nth epochs");
-    SetMinimumParameterIntValue("validation.step", 1);
-    SetDefaultParameterInt("validation.step", 10);
-    AddParameter(ParameterType_Choice, "validation.mode", "Metrics to compute");
-    AddChoice("validation.mode.none", "No validation step");
-    AddChoice("validation.mode.class", "Classification metrics");
-    AddChoice("validation.mode.rmse", "Root mean square error");
-    AddParameter(ParameterType_StringList,
-                 "validation.userplaceholders",
+    AddParameter(ParameterType_Group,       "validation",              "Validation parameters");
+    MandatoryOff                           ("validation");
+    AddParameter(ParameterType_Int,         "validation.step",         "Perform the validation every Nth epochs");
+    SetMinimumParameterIntValue            ("validation.step",         1);
+    SetDefaultParameterInt                 ("validation.step",         10);
+    AddParameter(ParameterType_Choice,      "validation.mode",         "Metrics to compute");
+    AddChoice                              ("validation.mode.none",    "No validation step");
+    AddChoice                              ("validation.mode.class",   "Classification metrics");
+    AddChoice                              ("validation.mode.rmse",    "Root mean square error");
+    AddParameter(ParameterType_StringList,  "validation.userplaceholders",
                  "Additional single-valued placeholders for validation. Supported types: int, float, bool.");
-    MandatoryOff("validation.userplaceholders");
-    AddParameter(ParameterType_Bool,
-                 "validation.usestreaming",
-                 "Use the streaming through patches (slower but can process big dataset)");
-    MandatoryOff("validation.usestreaming");
+    MandatoryOff                           ("validation.userplaceholders");
+    AddParameter(ParameterType_Bool,        "validation.usestreaming", "Use the streaming through patches (slower but can process big dataset)");
+    MandatoryOff                           ("validation.usestreaming");
 
     // Input/output images
     AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() + 1; i++) // +1 because we have at least 1 source more for training
-    {
+    for (int i = 1; i < tf::GetNumberOfSources() + 1 ; i++) // +1 because we have at least 1 source more for training
+      {
       AddAnInputImage();
-    }
+      }
 
     // Example
-    SetDocExampleParameterValue("source1.il", "spot6pms.tif");
-    SetDocExampleParameterValue("source1.placeholder", "x1");
-    SetDocExampleParameterValue("source1.patchsizex", "16");
-    SetDocExampleParameterValue("source1.patchsizey", "16");
-    SetDocExampleParameterValue("source2.il", "labels.tif");
-    SetDocExampleParameterValue("source2.placeholder", "y1");
-    SetDocExampleParameterValue("source2.patchsizex", "1");
-    SetDocExampleParameterValue("source2.patchsizex", "1");
-    SetDocExampleParameterValue("model.dir", "/tmp/my_saved_model/");
+    SetDocExampleParameterValue("source1.il",                "spot6pms.tif");
+    SetDocExampleParameterValue("source1.placeholder",       "x1");
+    SetDocExampleParameterValue("source1.patchsizex",        "16");
+    SetDocExampleParameterValue("source1.patchsizey",        "16");
+    SetDocExampleParameterValue("source2.il",                "labels.tif");
+    SetDocExampleParameterValue("source2.placeholder",       "y1");
+    SetDocExampleParameterValue("source2.patchsizex",        "1");
+    SetDocExampleParameterValue("source2.patchsizex",        "1");
+    SetDocExampleParameterValue("model.dir",                 "/tmp/my_saved_model/");
     SetDocExampleParameterValue("training.userplaceholders", "is_training=true dropout=0.2");
-    SetDocExampleParameterValue("training.targetnodes", "optimizer");
-    SetDocExampleParameterValue("model.saveto", "/tmp/my_saved_model/variables/variables");
+    SetDocExampleParameterValue("training.targetnodes",      "optimizer");
+    SetDocExampleParameterValue("model.saveto",              "/tmp/my_saved_model/variables/variables");
+
   }
 
   //
@@ -270,8 +261,7 @@ public:
   //       if we can keep trace of indices of sources for
   //       training / test / validation
   //
-  void
-  PrepareInputs()
+  void PrepareInputs()
   {
     // Clear placeholder names
     m_InputPlaceholdersForTraining.clear();
@@ -293,8 +283,8 @@ public:
 
 
     // Prepare the bundles
-    for (auto & bundle : m_Bundles)
-    {
+    for (auto& bundle: m_Bundles)
+      {
       // Source
       FloatVectorImageListType::Pointer trainStack = GetParameterImageList(bundle.m_KeyInForTrain);
       bundle.tfSource.Set(trainStack);
@@ -311,17 +301,17 @@ public:
       m_InputPatchesSizeForTraining.push_back(patchSize);
 
       otbAppLogINFO("New source:");
-      otbAppLogINFO("Patch size               : " << patchSize);
-      otbAppLogINFO("Placeholder (training)   : " << placeholderForTraining);
+      otbAppLogINFO("Patch size               : "<< patchSize);
+      otbAppLogINFO("Placeholder (training)   : "<< placeholderForTraining);
 
       // Prepare validation sources
       if (GetParameterInt("validation.mode") != 0)
-      {
+        {
         // Get the stack
         if (!HasValue(bundle.m_KeyInForValid))
-        {
+          {
           otbAppLogFATAL("No validation input is set for this source");
-        }
+          }
         FloatVectorImageListType::Pointer validStack = GetParameterImageList(bundle.m_KeyInForValid);
         bundle.tfSourceForValidation.Set(validStack);
 
@@ -329,12 +319,12 @@ public:
         // If yes, it means that its not an output tensor on which perform the validation
         std::string placeholderForValidation = GetParameterAsString(bundle.m_KeyPHNameForValid);
         if (placeholderForValidation.empty())
-        {
+          {
           placeholderForValidation = placeholderForTraining;
-        }
+          }
         // Same placeholder name ==> is a source for validation
         if (placeholderForValidation.compare(placeholderForTraining) == 0)
-        {
+          {
           // Source
           m_InputSourcesForEvaluationAgainstValidationData.push_back(bundle.tfSourceForValidation.Get());
           m_InputSourcesForEvaluationAgainstLearningData.push_back(bundle.tfSource.Get());
@@ -345,11 +335,12 @@ public:
           // Patch size
           m_InputPatchesSizeForValidation.push_back(patchSize);
 
-          otbAppLogINFO("Placeholder (validation) : " << placeholderForValidation);
-        }
+          otbAppLogINFO("Placeholder (validation) : "<< placeholderForValidation);
+
+          }
         // Different placeholder ==> is a target to validate
         else
-        {
+          {
           // Source
           m_InputTargetsForEvaluationAgainstValidationData.push_back(bundle.tfSourceForValidation.Get());
           m_InputTargetsForEvaluationAgainstLearningData.push_back(bundle.tfSource.Get());
@@ -360,54 +351,51 @@ public:
           // Patch size
           m_TargetPatchesSize.push_back(patchSize);
 
-          otbAppLogINFO("Tensor name (validation) : " << placeholderForValidation);
+          otbAppLogINFO("Tensor name (validation) : "<< placeholderForValidation);
+          }
+
         }
+
       }
-    }
   }
 
   //
   // Get user placeholders
   //
-  TrainModelFilterType::DictType
-  GetUserPlaceholders(const std::string & key)
+  TrainModelFilterType::DictType GetUserPlaceholders(const std::string & key)
   {
-    TrainModelFilterType::DictType   dict;
+    TrainModelFilterType::DictType dict;
     TrainModelFilterType::StringList expressions = GetParameterStringList(key);
-    for (auto & exp : expressions)
-    {
+    for (auto& exp: expressions)
+      {
       TrainModelFilterType::DictElementType entry = tf::ExpressionToTensor(exp);
       dict.push_back(entry);
 
       otbAppLogINFO("Using placeholder " << entry.first << " with " << tf::PrintTensorInfos(entry.second));
-    }
+      }
     return dict;
   }
 
   //
   // Print some classification metrics
   //
-  void
-  PrintClassificationMetrics(const ConfMatType & confMat, const MapOfClassesType & mapOfClassesRef)
+  void PrintClassificationMetrics(const ConfMatType & confMat, const MapOfClassesType & mapOfClassesRef)
   {
     ConfusionMatrixCalculatorType::Pointer confMatMeasurements = ConfusionMatrixCalculatorType::New();
     confMatMeasurements->SetConfusionMatrix(confMat);
     confMatMeasurements->SetMapOfClasses(mapOfClassesRef);
     confMatMeasurements->Compute();
 
-    for (auto const & itMapOfClassesRef : mapOfClassesRef)
-    {
+    for (auto const& itMapOfClassesRef : mapOfClassesRef)
+      {
       LabelValueType labelRef = itMapOfClassesRef.first;
       LabelValueType indexLabelRef = itMapOfClassesRef.second;
 
-      otbAppLogINFO("Precision of class [" << labelRef
-                                           << "] vs all: " << confMatMeasurements->GetPrecisions()[indexLabelRef]);
-      otbAppLogINFO("Recall of class [" << labelRef
-                                        << "] vs all: " << confMatMeasurements->GetRecalls()[indexLabelRef]);
-      otbAppLogINFO("F-score of class [" << labelRef
-                                         << "] vs all: " << confMatMeasurements->GetFScores()[indexLabelRef]);
+      otbAppLogINFO("Precision of class [" << labelRef << "] vs all: " << confMatMeasurements->GetPrecisions()[indexLabelRef]);
+      otbAppLogINFO("Recall of class [" << labelRef << "] vs all: " << confMatMeasurements->GetRecalls()[indexLabelRef]);
+      otbAppLogINFO("F-score of class [" << labelRef << "] vs all: " << confMatMeasurements->GetFScores()[indexLabelRef]);
       otbAppLogINFO("\t");
-    }
+      }
     otbAppLogINFO("Precision of the different classes: " << confMatMeasurements->GetPrecisions());
     otbAppLogINFO("Recall of the different classes: " << confMatMeasurements->GetRecalls());
     otbAppLogINFO("F-score of the different classes: " << confMatMeasurements->GetFScores());
@@ -417,8 +405,7 @@ public:
     otbAppLogINFO("Confusion matrix:\n" << confMat);
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
 
     // Load the Tensorflow bundle
@@ -426,13 +413,13 @@ public:
 
     // Check if we have to restore variables from somewhere else
     if (HasValue("model.restorefrom"))
-    {
+      {
       const std::string path = GetParameterAsString("model.restorefrom");
       otbAppLogINFO("Restoring model from " + path);
 
       // Load SavedModel variables
       tf::RestoreModel(path, m_SavedModel);
-    }
+      }
 
     // Prepare inputs
     PrepareInputs();
@@ -447,16 +434,18 @@ public:
     m_TrainModelFilter->SetUseStreaming(GetParameterInt("training.usestreaming"));
 
     // Set inputs
-    for (unsigned int i = 0; i < m_InputSourcesForTraining.size(); i++)
-    {
+    for (unsigned int i = 0 ; i < m_InputSourcesForTraining.size() ; i++)
+      {
       m_TrainModelFilter->PushBackInputTensorBundle(
-        m_InputPlaceholdersForTraining[i], m_InputPatchesSizeForTraining[i], m_InputSourcesForTraining[i]);
-    }
+          m_InputPlaceholdersForTraining[i],
+          m_InputPatchesSizeForTraining[i],
+          m_InputSourcesForTraining[i]);
+      }
 
     // Setup the validation filter
     const bool do_validation = HasUserValue("validation.mode");
-    if (GetParameterInt("validation.mode") == 1) // class
-    {
+    if (GetParameterInt("validation.mode")==1) // class
+      {
       otbAppLogINFO("Set validation mode to classification validation");
 
       m_ValidateModelFilter = ValidateModelFilterType::New();
@@ -467,18 +456,18 @@ public:
       m_ValidateModelFilter->SetInputReceptiveFields(m_InputPatchesSizeForValidation);
       m_ValidateModelFilter->SetOutputTensors(m_TargetTensorsNames);
       m_ValidateModelFilter->SetOutputExpressionFields(m_TargetPatchesSize);
-    }
-    else if (GetParameterInt("validation.mode") == 2) // rmse)
-    {
+      }
+    else if (GetParameterInt("validation.mode")==2) // rmse)
+      {
       otbAppLogINFO("Set validation mode to classification RMSE evaluation");
       otbAppLogFATAL("Not implemented yet !"); // XD
 
       // TODO
-    }
+      }
 
     // Epoch
-    for (int epoch = 1; epoch <= GetParameterInt("training.epochs"); epoch++)
-    {
+    for (int epoch = 1 ; epoch <= GetParameterInt("training.epochs") ; epoch++)
+      {
       // Train the model
       AddProcess(m_TrainModelFilter, "Training epoch #" + std::to_string(epoch));
       m_TrainModelFilter->Update();
@@ -490,7 +479,7 @@ public:
         {
           // 1. Evaluate the metrics against the learning data
 
-          for (unsigned int i = 0; i < m_InputSourcesForEvaluationAgainstLearningData.size(); i++)
+          for (unsigned int i = 0 ; i < m_InputSourcesForEvaluationAgainstLearningData.size() ; i++)
           {
             m_ValidateModelFilter->SetInput(i, m_InputSourcesForEvaluationAgainstLearningData[i]);
           }
@@ -503,17 +492,16 @@ public:
           AddProcess(m_ValidateModelFilter, "Evaluate model (Learning data)");
           m_ValidateModelFilter->Update();
 
-          for (unsigned int i = 0; i < m_TargetTensorsNames.size(); i++)
+          for (unsigned int i = 0 ; i < m_TargetTensorsNames.size() ; i++)
           {
             otbAppLogINFO("Metrics for target \"" << m_TargetTensorsNames[i] << "\":");
-            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i),
-                                       m_ValidateModelFilter->GetMapOfClasses(i));
+            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i), m_ValidateModelFilter->GetMapOfClasses(i));
           }
 
           // 2. Evaluate the metrics against the validation data
 
           // Here we just change the input sources and references
-          for (unsigned int i = 0; i < m_InputSourcesForEvaluationAgainstValidationData.size(); i++)
+          for (unsigned int i = 0 ; i < m_InputSourcesForEvaluationAgainstValidationData.size() ; i++)
           {
             m_ValidateModelFilter->SetInput(i, m_InputSourcesForEvaluationAgainstValidationData[i]);
           }
@@ -524,28 +512,29 @@ public:
           AddProcess(m_ValidateModelFilter, "Evaluate model (Validation data)");
           m_ValidateModelFilter->Update();
 
-          for (unsigned int i = 0; i < m_TargetTensorsNames.size(); i++)
+          for (unsigned int i = 0 ; i < m_TargetTensorsNames.size() ; i++)
           {
             otbAppLogINFO("Metrics for target \"" << m_TargetTensorsNames[i] << "\":");
-            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i),
-                                       m_ValidateModelFilter->GetMapOfClasses(i));
+            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i), m_ValidateModelFilter->GetMapOfClasses(i));
           }
         } // Step is OK to perform validation
-      }   // Do the validation against the validation data
+      } // Do the validation against the validation data
 
-    } // Next epoch
+      } // Next epoch
 
     // Check if we have to save variables to somewhere
     if (HasValue("model.saveto"))
-    {
+      {
       const std::string path = GetParameterAsString("model.saveto");
       otbAppLogINFO("Saving model to " + path);
       tf::SaveModel(path, m_SavedModel);
-    }
+      }
+
   }
 
 private:
-  tensorflow::SavedModelBundle m_SavedModel; // must be alive during all the execution of the application !
+
+  tensorflow::SavedModelBundle     m_SavedModel; // must be alive during all the execution of the application !
 
   // Filters
   TrainModelFilterType::Pointer    m_TrainModelFilter;
@@ -555,9 +544,9 @@ private:
   BundleList m_Bundles;
 
   // Patches size
-  SizeList m_InputPatchesSizeForTraining;
-  SizeList m_InputPatchesSizeForValidation;
-  SizeList m_TargetPatchesSize;
+  SizeList   m_InputPatchesSizeForTraining;
+  SizeList   m_InputPatchesSizeForValidation;
+  SizeList   m_TargetPatchesSize;
 
   // Placeholders and Tensors names
   StringList m_InputPlaceholdersForTraining;
@@ -573,7 +562,7 @@ private:
 
 }; // end of class
 
-} // namespace Wrapper
+} // namespace wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT(otb::Wrapper::TensorflowModelTrain)
+OTB_APPLICATION_EXPORT( otb::Wrapper::TensorflowModelTrain )
diff --git a/app/otbTrainClassifierFromDeepFeatures.cxx b/app/otbTrainClassifierFromDeepFeatures.cxx
index cc3ec9ed..39ac4189 100644
--- a/app/otbTrainClassifierFromDeepFeatures.cxx
+++ b/app/otbTrainClassifierFromDeepFeatures.cxx
@@ -34,23 +34,23 @@ class TrainClassifierFromDeepFeatures : public CompositeApplication
 {
 public:
   /** Standard class typedefs. */
-  typedef TrainClassifierFromDeepFeatures Self;
-  typedef Application                     Superclass;
-  typedef itk::SmartPointer<Self>         Pointer;
-  typedef itk::SmartPointer<const Self>   ConstPointer;
+  typedef TrainClassifierFromDeepFeatures              Self;
+  typedef Application                         Superclass;
+  typedef itk::SmartPointer<Self>             Pointer;
+  typedef itk::SmartPointer<const Self>       ConstPointer;
 
   /** Standard macro */
   itkNewMacro(Self);
   itkTypeMacro(TrainClassifierFromDeepFeatures, otb::Wrapper::CompositeApplication);
 
 private:
+
   //
   // Add an input source, which includes:
   // -an input image list
   // -an input patchsize (dimensions of samples)
   //
-  void
-  AddAnInputImage(int inputNumber = 0)
+  void AddAnInputImage(int inputNumber = 0)
   {
     inputNumber++;
 
@@ -61,83 +61,70 @@ private:
 
     // Populate group
     ShareParameter(ss_key_group.str(), "tfmodel." + ss_key_group.str(), ss_desc_group.str());
+
   }
 
-  void
-  DoInit()
+  void DoInit()
   {
 
-    SetName("TrainClassifierFromDeepFeatures");
-    SetDescription("Train a classifier from deep net based features of an image and training vector data.");
-
-    // Documentation
-    SetDocLongDescription("See TrainImagesClassifier application");
-    SetDocLimitations("None");
-    SetDocAuthors("Remi Cresson");
-    SetDocSeeAlso(" ");
-
-    AddDocTag(Tags::Learning);
-
-    ClearApplications();
-
-    // Add applications
-    AddApplication("TrainImagesClassifier", "train", "Train images classifier");
-    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model");
-
-    // Model shared parameters
-    AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources(); i++)
-    {
-      AddAnInputImage(i);
-    }
-    ShareParameter("model",
-                   "tfmodel.model",
-                   "Deep net inputs parameters",
-                   "Parameters of the deep net inputs: placeholder names, receptive fields, etc.");
-    ShareParameter("output",
-                   "tfmodel.output",
-                   "Deep net outputs parameters",
-                   "Parameters of the deep net outputs: tensors names, expression fields, etc.");
-    ShareParameter("optim",
-                   "tfmodel.optim",
-                   "Processing time optimization",
-                   "This group of parameters allows optimization of processing time");
-
-    // Train shared parameters
-    ShareParameter("ram", "train.ram", "Available RAM (Mb)", "Available RAM (Mb)");
-    ShareParameter("vd", "train.io.vd", "Vector data for training", "Input vector data for training");
-    ShareParameter("valid", "train.io.valid", "Vector data for validation", "Input vector data for validation");
-    ShareParameter("out", "train.io.out", "Output classification model", "Output classification model");
-    ShareParameter("confmatout",
-                   "train.io.confmatout",
-                   "Output confusion matrix",
-                   "Output confusion matrix of the classification model");
-
-    // Shared parameter groups
-    ShareParameter("sample", "train.sample", "Sampling parameters", "Training and validation samples parameters");
-    ShareParameter("elev", "train.elev", "Elevation parameters", "Elevation parameters");
-    ShareParameter("classifier", "train.classifier", "Classifier parameters", "Classifier parameters");
-    ShareParameter("rand", "train.rand", "User defined random seed", "User defined random seed");
+  SetName("TrainClassifierFromDeepFeatures");
+  SetDescription("Train a classifier from deep net based features of an image and training vector data.");
+
+  // Documentation
+  SetDocLongDescription("See TrainImagesClassifier application");
+  SetDocLimitations("None");
+  SetDocAuthors("Remi Cresson");
+  SetDocSeeAlso(" ");
+
+  AddDocTag(Tags::Learning);
+
+  ClearApplications();
+
+  // Add applications
+  AddApplication("TrainImagesClassifier",  "train",   "Train images classifier");
+  AddApplication("TensorflowModelServe",   "tfmodel", "Serve the TF model");
+
+  // Model shared parameters
+  AddAnInputImage();
+  for (int i = 1; i < tf::GetNumberOfSources() ; i++)
+  {
+    AddAnInputImage(i);
   }
+  ShareParameter("model",      "tfmodel.model",       "Deep net inputs parameters",   "Parameters of the deep net inputs: placeholder names, receptive fields, etc.");
+  ShareParameter("output",     "tfmodel.output",      "Deep net outputs parameters",  "Parameters of the deep net outputs: tensors names, expression fields, etc.");
+  ShareParameter("optim",      "tfmodel.optim",       "Processing time optimization", "This group of parameters allows optimization of processing time");
+
+  // Train shared parameters
+  ShareParameter("ram",        "train.ram",           "Available RAM (Mb)",           "Available RAM (Mb)");
+  ShareParameter("vd",         "train.io.vd",         "Vector data for training",     "Input vector data for training");
+  ShareParameter("valid",      "train.io.valid",      "Vector data for validation",   "Input vector data for validation");
+  ShareParameter("out",        "train.io.out",        "Output classification model",  "Output classification model");
+  ShareParameter("confmatout", "train.io.confmatout", "Output confusion matrix",      "Output confusion matrix of the classification model");
+
+  // Shared parameter groups
+  ShareParameter("sample",     "train.sample",        "Sampling parameters" ,         "Training and validation samples parameters" );
+  ShareParameter("elev",       "train.elev",          "Elevation parameters",         "Elevation parameters" );
+  ShareParameter("classifier", "train.classifier",    "Classifier parameters",        "Classifier parameters" );
+  ShareParameter("rand",       "train.rand",          "User defined random seed",     "User defined random seed" );
 
+  }
 
-  void
-  DoUpdateParameters()
+
+  void DoUpdateParameters()
   {
     UpdateInternalParameters("train");
   }
 
-  void
-  DoExecute()
+  void DoExecute()
   {
     ExecuteInternal("tfmodel");
-    GetInternalApplication("train")->AddImageToParameterInputImageList(
-      "io.il", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
+    GetInternalApplication("train")->AddImageToParameterInputImageList("io.il", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
     UpdateInternalParameters("train");
     ExecuteInternal("train");
   }
+
 };
 } // namespace Wrapper
 } // namespace otb
 
-OTB_APPLICATION_EXPORT(otb::Wrapper::TrainClassifierFromDeepFeatures)
+OTB_APPLICATION_EXPORT( otb::Wrapper::TrainClassifierFromDeepFeatures )
-- 
GitLab


From b0685fe1da4b8f94b035f37cfdc7e63138e461a6 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Tue, 5 Apr 2022 20:37:17 +0200
Subject: [PATCH 07/12] FIX: conflicts between develop and master

---
 include/CMakeLists.txt                        |  40 -
 include/otbDensePolygonClassStatistics.cxx    | 270 ------
 .../otbImageClassifierFromDeepFeatures.cxx    | 128 ---
 include/otbLabelImageSampleSelection.cxx      | 388 --------
 include/otbPatchesExtraction.cxx              | 261 ------
 include/otbPatchesSelection.cxx               | 825 ------------------
 include/otbTensorflowModelServe.cxx           | 350 --------
 include/otbTensorflowModelTrain.cxx           | 568 ------------
 .../otbTrainClassifierFromDeepFeatures.cxx    | 130 ---
 9 files changed, 2960 deletions(-)
 delete mode 100644 include/CMakeLists.txt
 delete mode 100644 include/otbDensePolygonClassStatistics.cxx
 delete mode 100644 include/otbImageClassifierFromDeepFeatures.cxx
 delete mode 100644 include/otbLabelImageSampleSelection.cxx
 delete mode 100644 include/otbPatchesExtraction.cxx
 delete mode 100644 include/otbPatchesSelection.cxx
 delete mode 100644 include/otbTensorflowModelServe.cxx
 delete mode 100644 include/otbTensorflowModelTrain.cxx
 delete mode 100644 include/otbTrainClassifierFromDeepFeatures.cxx

diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt
deleted file mode 100644
index 520249b1..00000000
--- a/include/CMakeLists.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-if(OTB_USE_TENSORFLOW)
-  # Tensorflow-dependent APPS
-  OTB_CREATE_APPLICATION(NAME TensorflowModelServe
-	SOURCES otbTensorflowModelServe.cxx ${${otb-module}_SYSTEM_INCLUDE_DIRS} "${tensorflow_include_dir}"
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${TENSORFLOW_LIBS}
-  )
-
-  OTB_CREATE_APPLICATION(NAME TensorflowModelTrain
-	SOURCES otbTensorflowModelTrain.cxx ${${otb-module}_SYSTEM_INCLUDE_DIRS} "${tensorflow_include_dir}"
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${TENSORFLOW_LIBS}
-  )
-
-  OTB_CREATE_APPLICATION(NAME TrainClassifierFromDeepFeatures
-	SOURCES otbTrainClassifierFromDeepFeatures.cxx ${${otb-module}_SYSTEM_INCLUDE_DIRS} "${tensorflow_include_dir}"
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${TENSORFLOW_LIBS}
-  )
-  
-  OTB_CREATE_APPLICATION(NAME ImageClassifierFromDeepFeatures
-	SOURCES otbImageClassifierFromDeepFeatures.cxx ${${otb-module}_SYSTEM_INCLUDE_DIRS} "${tensorflow_include_dir}"
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES} ${TENSORFLOW_LIBS}
-  )
-endif()
-
-# Tensorflow-independent APPS
-OTB_CREATE_APPLICATION(NAME PatchesSelection
-	SOURCES otbPatchesSelection.cxx
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES}
-)
-OTB_CREATE_APPLICATION(NAME PatchesExtraction
-	SOURCES otbPatchesExtraction.cxx
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES}
-)
-OTB_CREATE_APPLICATION(NAME LabelImageSampleSelection
-	SOURCES otbLabelImageSampleSelection.cxx
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES}
-)
-OTB_CREATE_APPLICATION(NAME DensePolygonClassStatistics
-	SOURCES otbDensePolygonClassStatistics.cxx
-	LINK_LIBRARIES ${${otb-module}_LIBRARIES}
-)
diff --git a/include/otbDensePolygonClassStatistics.cxx b/include/otbDensePolygonClassStatistics.cxx
deleted file mode 100644
index fa7c2701..00000000
--- a/include/otbDensePolygonClassStatistics.cxx
+++ /dev/null
@@ -1,270 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-#include "otbWrapperApplicationFactory.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// Filters
-#include "otbStatisticsXMLFileWriter.h"
-#include "otbWrapperElevationParametersHandler.h"
-#include "otbVectorDataToLabelImageFilter.h"
-#include "otbImageToNoDataMaskFilter.h"
-#include "otbStreamingStatisticsMapFromLabelImageFilter.h"
-#include "otbVectorDataIntoImageProjectionFilter.h"
-#include "otbImageToVectorImageCastFilter.h"
-
-// OGR
-#include "otbOGR.h"
-
-namespace otb
-{
-namespace Wrapper
-{
-/** Utility function to negate std::isalnum */
-bool IsNotAlphaNum(char c)
-  {
-  return !std::isalnum(c);
-  }
-
-class DensePolygonClassStatistics : public Application
-{
-public:
-  /** Standard class typedefs. */
-  typedef DensePolygonClassStatistics   Self;
-  typedef Application                   Superclass;
-  typedef itk::SmartPointer<Self>       Pointer;
-  typedef itk::SmartPointer<const Self> ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(DensePolygonClassStatistics, Application);
-
-  /** DataObjects typedef */
-  typedef UInt32ImageType                           LabelImageType;
-  typedef UInt8ImageType                            MaskImageType;
-  typedef VectorData<>                              VectorDataType;
-
-  /** ProcessObjects typedef */
-  typedef otb::VectorDataIntoImageProjectionFilter<VectorDataType,
-      FloatVectorImageType>                                                       VectorDataReprojFilterType;
-
-  typedef otb::VectorDataToLabelImageFilter<VectorDataType, LabelImageType>       RasterizeFilterType;
-
-  typedef otb::VectorImage<MaskImageType::PixelType>                              InternalMaskImageType;
-  typedef otb::ImageToNoDataMaskFilter<FloatVectorImageType, MaskImageType>       NoDataMaskFilterType;
-  typedef otb::ImageToVectorImageCastFilter<MaskImageType, InternalMaskImageType> CastFilterType;
-
-  typedef otb::StreamingStatisticsMapFromLabelImageFilter<InternalMaskImageType,
-      LabelImageType>                                                             StatsFilterType;
-
-  typedef otb::StatisticsXMLFileWriter<FloatVectorImageType::PixelType>           StatWriterType;
-
-  void DoInit()
-  {
-    SetName("DensePolygonClassStatistics");
-    SetDescription("Computes statistics on a training polygon set.");
-
-    // Documentation
-    SetDocLongDescription("The application processes a dense set of polygons "
-      "intended for training (they should have a field giving the associated "
-      "class). The geometries are analyzed against a support image to compute "
-      "statistics : \n"
-      "  - number of samples per class\n"
-      "  - number of samples per geometry\n");
-    SetDocLimitations("None");
-    SetDocAuthors("Remi Cresson");
-
-    AddDocTag(Tags::Learning);
-
-    AddParameter(ParameterType_InputImage,  "in",   "Input image");
-    SetParameterDescription("in", "Support image that will be classified");
-    
-    AddParameter(ParameterType_InputVectorData, "vec", "Input vectors");
-    SetParameterDescription("vec","Input geometries to analyze");
-    
-    AddParameter(ParameterType_OutputFilename, "out", "Output XML statistics file");
-    SetParameterDescription("out","Output file to store statistics (XML format)");
-
-    AddParameter(ParameterType_ListView, "field", "Field Name");
-    SetParameterDescription("field","Name of the field carrying the class number in the input vectors.");
-    SetListViewSingleSelectionMode("field",true);
-
-    ElevationParametersHandler::AddElevationParameters(this, "elev");
-
-    AddRAMParameter();
-
-    // Doc example parameter settings
-    SetDocExampleParameterValue("in", "support_image.tif");
-    SetDocExampleParameterValue("vec", "variousVectors.shp");
-    SetDocExampleParameterValue("field", "label");
-    SetDocExampleParameterValue("out","polygonStat.xml");
-
-  }
-
-  void DoExecute()
-  {
-
-  // Retrieve the field name
-  std::vector<int> selectedCFieldIdx = GetSelectedItems("field");
-
-  if(selectedCFieldIdx.empty())
-    {
-    otbAppLogFATAL(<<"No field has been selected for data labelling!");
-    }
-
-  std::vector<std::string> cFieldNames = GetChoiceNames("field");  
-  std::string fieldName = cFieldNames[selectedCFieldIdx.front()];
-
-  otb::Wrapper::ElevationParametersHandler::SetupDEMHandlerFromElevationParameters(this,"elev");
-
-  // Get inputs
-  FloatVectorImageType::Pointer xs = GetParameterImage("in");
-  VectorDataType* shp = GetParameterVectorData("vec");
-
-  // Reproject vector data
-  m_VectorDataReprojectionFilter = VectorDataReprojFilterType::New();
-  m_VectorDataReprojectionFilter->SetInputVectorData(shp);
-  m_VectorDataReprojectionFilter->SetInputImage(xs);
-  m_VectorDataReprojectionFilter->Update();
-
-  // Internal no-data value
-  const LabelImageType::ValueType intNoData =
-      itk::NumericTraits<LabelImageType::ValueType>::max();
-
-  // Rasterize vector data (geometry ID)
-  m_RasterizeFIDFilter = RasterizeFilterType::New();
-  m_RasterizeFIDFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
-  m_RasterizeFIDFilter->SetOutputOrigin(xs->GetOrigin());
-  m_RasterizeFIDFilter->SetOutputSpacing(xs->GetSignedSpacing());
-  m_RasterizeFIDFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
-  m_RasterizeFIDFilter->SetBurnAttribute("________"); // Trick to get the polygon ID
-  m_RasterizeFIDFilter->SetGlobalWarningDisplay(false);
-  m_RasterizeFIDFilter->SetOutputProjectionRef(xs->GetProjectionRef());
-  m_RasterizeFIDFilter->SetBackgroundValue(intNoData);
-  m_RasterizeFIDFilter->SetDefaultBurnValue(0);
-
-  // Rasterize vector data (geometry class)
-  m_RasterizeClassFilter = RasterizeFilterType::New();
-  m_RasterizeClassFilter->AddVectorData(m_VectorDataReprojectionFilter->GetOutput());
-  m_RasterizeClassFilter->SetOutputOrigin(xs->GetOrigin());
-  m_RasterizeClassFilter->SetOutputSpacing(xs->GetSignedSpacing());
-  m_RasterizeClassFilter->SetOutputSize(xs->GetLargestPossibleRegion().GetSize());
-  m_RasterizeClassFilter->SetBurnAttribute(fieldName);
-  m_RasterizeClassFilter->SetOutputProjectionRef(xs->GetProjectionRef());
-  m_RasterizeClassFilter->SetBackgroundValue(intNoData);
-  m_RasterizeClassFilter->SetDefaultBurnValue(0);
-
-  // No data mask
-  m_NoDataFilter = NoDataMaskFilterType::New();
-  m_NoDataFilter->SetInput(xs);
-  m_NoDataCastFilter = CastFilterType::New();
-  m_NoDataCastFilter->SetInput(m_NoDataFilter->GetOutput());
-
-  // Stats (geometry ID)
-  m_FIDStatsFilter = StatsFilterType::New();
-  m_FIDStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
-  m_FIDStatsFilter->SetInputLabelImage(m_RasterizeFIDFilter->GetOutput());
-  m_FIDStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
-  AddProcess(m_FIDStatsFilter->GetStreamer(), "Computing number of samples per vector");
-  m_FIDStatsFilter->Update();
-
-  // Stats (geometry class)
-  m_ClassStatsFilter = StatsFilterType::New();
-  m_ClassStatsFilter->SetInput(m_NoDataCastFilter->GetOutput());
-  m_ClassStatsFilter->SetInputLabelImage(m_RasterizeClassFilter->GetOutput());
-  m_ClassStatsFilter->GetStreamer()->SetAutomaticAdaptativeStreaming(GetParameterInt("ram"));
-  AddProcess(m_ClassStatsFilter->GetStreamer(), "Computing number of samples per class");
-  m_ClassStatsFilter->Update();
-
-  // Remove the no-data entries
-  StatsFilterType::LabelPopulationMapType fidMap = m_FIDStatsFilter->GetLabelPopulationMap();
-  StatsFilterType::LabelPopulationMapType classMap = m_ClassStatsFilter->GetLabelPopulationMap();
-  fidMap.erase(intNoData);
-  classMap.erase(intNoData);
-
-  m_StatWriter = StatWriterType::New();
-  m_StatWriter->SetFileName(this->GetParameterString("out"));
-  m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerClass", classMap);
-  m_StatWriter->AddInputMap<StatsFilterType::LabelPopulationMapType>("samplesPerVector", fidMap);
-  m_StatWriter->Update();
-
-  }
-
-  void DoUpdateParameters()
-  {
-     if (HasValue("vec"))
-      {
-      std::string vectorFile = GetParameterString("vec");
-      ogr::DataSource::Pointer ogrDS =
-        ogr::DataSource::New(vectorFile, ogr::DataSource::Modes::Read);
-      ogr::Layer layer = ogrDS->GetLayer(0);
-      ogr::Feature feature = layer.ogr().GetNextFeature();
-
-      ClearChoices("field");
-
-      for(int iField=0; iField<feature.ogr().GetFieldCount(); iField++)
-        {
-        std::string key, item = feature.ogr().GetFieldDefnRef(iField)->GetNameRef();
-        key = item;
-        std::string::iterator end = std::remove_if(key.begin(),key.end(),IsNotAlphaNum);
-        std::transform(key.begin(), end, key.begin(), tolower);
-
-        OGRFieldType fieldType = feature.ogr().GetFieldDefnRef(iField)->GetType();
-
-        if(fieldType == OFTString || fieldType == OFTInteger || fieldType == OFTInteger64)
-          {
-          std::string tmpKey="field."+key.substr(0, end - key.begin());
-          AddChoice(tmpKey,item);
-          }
-        }
-      }
-
-     // Check that the extension of the output parameter is XML (mandatory for
-     // StatisticsXMLFileWriter)
-     // Check it here to trigger the error before polygons analysis
-
-     if (HasValue("out"))
-       {
-       // Store filename extension
-       // Check that the right extension is given : expected .xml
-       const std::string extension = itksys::SystemTools::GetFilenameLastExtension(this->GetParameterString("out"));
-
-       if (itksys::SystemTools::LowerCase(extension) != ".xml")
-         {
-         otbAppLogFATAL( << extension << " is a wrong extension for parameter \"out\": Expected .xml" );
-         }
-       }
-  }
-
-
-
-private:
-  // Filters
-  VectorDataReprojFilterType::Pointer m_VectorDataReprojectionFilter;
-  RasterizeFilterType::Pointer m_RasterizeFIDFilter;
-  RasterizeFilterType::Pointer m_RasterizeClassFilter;
-  NoDataMaskFilterType::Pointer m_NoDataFilter;
-  CastFilterType::Pointer m_NoDataCastFilter;
-  StatsFilterType::Pointer m_FIDStatsFilter;
-  StatsFilterType::Pointer m_ClassStatsFilter;
-  StatWriterType::Pointer m_StatWriter;
-
-};
-
-} // end of namespace Wrapper
-} // end of namespace otb
-
-OTB_APPLICATION_EXPORT(otb::Wrapper::DensePolygonClassStatistics)
diff --git a/include/otbImageClassifierFromDeepFeatures.cxx b/include/otbImageClassifierFromDeepFeatures.cxx
deleted file mode 100644
index f3ffd273..00000000
--- a/include/otbImageClassifierFromDeepFeatures.cxx
+++ /dev/null
@@ -1,128 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-
-// Elevation handler
-#include "otbWrapperElevationParametersHandler.h"
-#include "otbWrapperApplicationFactory.h"
-#include "otbWrapperCompositeApplication.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// TF (used to get the environment variable for the number of inputs)
-#include "otbTensorflowCommon.h"
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class ImageClassifierFromDeepFeatures : public CompositeApplication
-{
-public:
-  /** Standard class typedefs. */
-  typedef ImageClassifierFromDeepFeatures              Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(ImageClassifierFromDeepFeatures, otb::Wrapper::CompositeApplication);
-
-private:
-
-  //
-  // Add an input source, which includes:
-  // -an input image list
-  // -an input patchsize (dimensions of samples)
-  //
-  void AddAnInputImage(int inputNumber = 0)
-  {
-    inputNumber++;
-
-    // Create keys and descriptions
-    std::stringstream ss_key_group, ss_desc_group;
-    ss_key_group << "source" << inputNumber;
-    ss_desc_group << "Parameters for source " << inputNumber;
-
-    // Populate group
-    ShareParameter(ss_key_group.str(), "tfmodel." + ss_key_group.str(), ss_desc_group.str());
-  }
-
-
-  void DoInit()
-  {
-
-    SetName("ImageClassifierFromDeepFeatures");
-    SetDescription("Classify image using features from a deep net and an OTB machine learning classification model");
-
-    // Documentation
-    SetDocLongDescription("See ImageClassifier application");
-    SetDocLimitations("None");
-    SetDocAuthors("Remi Cresson");
-    SetDocSeeAlso(" ");
-
-    AddDocTag(Tags::Learning);
-
-    ClearApplications();
-
-    // Add applications
-    AddApplication("ImageClassifier",      "classif", "Images classifier"  );
-    AddApplication("TensorflowModelServe", "tfmodel", "Serve the TF model" );
-
-    // Model shared parameters
-    AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
-    {
-      AddAnInputImage(i);
-    }
-    ShareParameter("deepmodel",  "tfmodel.model",
-        "Deep net model parameters",      "Deep net model parameters");
-    ShareParameter("output",     "tfmodel.output",
-        "Deep net outputs parameters",
-        "Deep net outputs parameters");
-    ShareParameter("optim", "tfmodel.optim",
-        "This group of parameters allows optimization of processing time",
-        "This group of parameters allows optimization of processing time");
-
-    // Classify shared parameters
-    ShareParameter("model"      , "classif.model"      , "Model file"          , "Model file"          );
-    ShareParameter("imstat"     , "classif.imstat"     , "Statistics file"     , "Statistics file"     );
-    ShareParameter("nodatalabel", "classif.nodatalabel", "Label mask value"    , "Label mask value"    );
-    ShareParameter("out"        , "classif.out"        , "Output image"        , "Output image"        );
-    ShareParameter("confmap"    , "classif.confmap"    , "Confidence map image", "Confidence map image");
-    ShareParameter("ram"        , "classif.ram"        , "Ram"                 , "Ram"                 );
-  }
-
-  void DoUpdateParameters()
-  {
-    UpdateInternalParameters("classif");
-  }
-
-  void DoExecute()
-  {
-    ExecuteInternal("tfmodel");
-    GetInternalApplication("classif")->SetParameterInputImage("in", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
-    UpdateInternalParameters("classif");
-    ExecuteInternal("classif");
-  }
-
-};
-} // namespace Wrapper
-} // namespace otb
-
-OTB_APPLICATION_EXPORT( otb::Wrapper::ImageClassifierFromDeepFeatures )
diff --git a/include/otbLabelImageSampleSelection.cxx b/include/otbLabelImageSampleSelection.cxx
deleted file mode 100644
index 50396fa0..00000000
--- a/include/otbLabelImageSampleSelection.cxx
+++ /dev/null
@@ -1,388 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-#include "otbWrapperApplicationFactory.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-#include "vnl/vnl_vector.h"
-#include "itkImageRegionIterator.h"
-#include "itkImageRegionConstIterator.h"
-
-// image utils
-#include "otbTensorflowCommon.h"
-#include <algorithm>
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class LabelImageSampleSelection : public Application
-{
-public:
-  /** Standard class typedefs. */
-  typedef LabelImageSampleSelection           Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(LabelImageSampleSelection, Application);
-
-  /** Vector data typedefs */
-  typedef VectorDataType::DataTreeType                 DataTreeType;
-  typedef itk::PreOrderTreeIterator<DataTreeType>      TreeIteratorType;
-  typedef VectorDataType::DataNodeType                 DataNodeType;
-  typedef DataNodeType::Pointer                        DataNodePointer;
-
-  /** typedefs */
-  typedef Int16ImageType                               LabelImageType;
-  typedef unsigned int                                 IndexValueType;
-
-  void DoUpdateParameters()
-  {
-  }
-
-  /*
-   * Display the percentage
-   */
-  void ShowProgress(unsigned int count, unsigned int total, unsigned int step = 1000)
-  {
-    if (count % step == 0)
-    {
-      std::cout << std::setprecision(3) << "\r" << (100.0 * count / (float) total) << "%      " << std::flush;
-    }
-  }
-
-  void ShowProgressDone()
-  {
-    std::cout << "\rDone      " << std::flush;
-    std::cout << std::endl;
-  }
-
-  void DoInit()
-  {
-
-    // Documentation
-    SetName("LabelImageSampleSelection");
-    SetDescription("This application extracts points from an input label image. "
-        "This application is like \"SampleSelection\", but uses an input label "
-        "image, rather than an input vector data.");
-    SetDocLongDescription("This application produces a vector data containing "
-        "a set of points centered on the pixels of the input label image. "
-        "The user can control the number of points. The default strategy consists "
-        "in producing the same number of points in each class. If one class has a "
-        "smaller number of points than requested, this one is adjusted.");
-
-    SetDocAuthors("Remi Cresson");
-
-    // Input terrain truth
-    AddParameter(ParameterType_InputImage, "inref", "input terrain truth");
-
-    // Strategy
-    AddParameter(ParameterType_Choice, "strategy", "Sampling strategy");
-
-    AddChoice("strategy.constant","Set the same samples counts for all classes");
-    SetParameterDescription("strategy.constant","Set the same samples counts for all classes");
-
-    AddParameter(ParameterType_Int, "strategy.constant.nb", "Number of samples for all classes");
-    SetParameterDescription("strategy.constant.nb", "Number of samples for all classes");
-    SetMinimumParameterIntValue("strategy.constant.nb",1);
-    SetDefaultParameterInt("strategy.constant.nb",1000);
-
-    AddChoice("strategy.total","Set the total number of samples to generate, and use class proportions.");
-    SetParameterDescription("strategy.total","Set the total number of samples to generate, and use class proportions.");
-    AddParameter(ParameterType_Int,"strategy.total.v","The number of samples to generate");
-    SetParameterDescription("strategy.total.v","The number of samples to generate");
-    SetMinimumParameterIntValue("strategy.total.v",1);
-    SetDefaultParameterInt("strategy.total.v",1000);
-
-    AddChoice("strategy.smallest","Set same number of samples for all classes, with the smallest class fully sampled");
-    SetParameterDescription("strategy.smallest","Set same number of samples for all classes, with the smallest class fully sampled");
-
-    AddChoice("strategy.all","Take all samples");
-    SetParameterDescription("strategy.all","Take all samples");
-
-    // Default strategy : smallest
-    SetParameterString("strategy","constant");
-
-    // Input no-data value
-    AddParameter(ParameterType_Int, "nodata", "nodata value");
-    MandatoryOn                    ("nodata");
-    SetDefaultParameterInt         ("nodata", -1);
-
-    // Padding
-    AddParameter(ParameterType_Int, "pad", "padding, in pixels");
-    SetDefaultParameterInt         ("pad", 0);
-    MandatoryOff                   ("pad");
-
-    // Output points
-    AddParameter(ParameterType_OutputVectorData, "outvec", "output set of points");
-
-    // Some example
-    SetDocExampleParameterValue("inref", "rasterized_terrain_truth.tif");
-    SetDocExampleParameterValue("outvec", "terrain_truth_points_sel.sqlite");
-
-    AddRAMParameter();
-
-  }
-
-
-  void DoExecute()
-  {
-
-    // Count the number of pixels in each class
-    const LabelImageType::InternalPixelType MAX_NB_OF_CLASSES =
-        itk::NumericTraits<LabelImageType::InternalPixelType>::max();;
-    LabelImageType::InternalPixelType class_begin = MAX_NB_OF_CLASSES;
-    LabelImageType::InternalPixelType class_end = 0;
-    vnl_vector<IndexValueType> tmp_number_of_samples(MAX_NB_OF_CLASSES, 0);
-
-    otbAppLogINFO("Computing number of pixels in each class");
-
-    // Explicit streaming over the input target image, based on the RAM parameter
-    typedef otb::RAMDrivenStrippedStreamingManager<FloatVectorImageType> StreamingManagerType;
-    StreamingManagerType::Pointer m_StreamingManager = StreamingManagerType::New();
-    m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
-
-    // We pad the image, if this is requested by the user
-    LabelImageType::Pointer inputImage = GetParameterInt16Image("inref");
-    LabelImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
-    entireRegion.ShrinkByRadius(GetParameterInt("pad"));
-    m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
-
-    // Get nodata value
-    const LabelImageType::InternalPixelType nodata = GetParameterInt("nodata");
-
-    // First iteration to count the objects in each class
-    int m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
-    for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
-    {
-      LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
-      tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
-      for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
-      {
-        LabelImageType::InternalPixelType pixVal = inIt.Get();
-        if (pixVal != nodata)
-        {
-          // Update min and max value
-          if (pixVal > class_end)
-            class_end = pixVal;
-          if (pixVal < class_begin)
-            class_begin = pixVal;
-
-          tmp_number_of_samples(pixVal)++;
-        }
-      }
-
-      ShowProgress(m_CurrentDivision, m_NumberOfDivisions, 1);
-    }
-    ShowProgressDone();
-
-    // Number of classes
-    const LabelImageType::InternalPixelType number_of_classes = class_end - class_begin + 1;
-
-    // Number of samples in each class (counted)
-    vnl_vector<IndexValueType> number_of_samples = tmp_number_of_samples.extract(number_of_classes, class_begin);
-
-    // Number of samples in each class (target)
-    vnl_vector<IndexValueType> target_number_of_samples(number_of_classes, 0);
-
-    otbAppLogINFO( "Number of classes: " << number_of_classes <<
-        " starting from " << class_begin <<
-        " to " << class_end << " (no-data is " << nodata << ")");
-    otbAppLogINFO( "Number of pixels in each class: " << number_of_samples );
-
-    // Check the smallest number of samples amongst classes
-    IndexValueType min_elem_in_class = itk::NumericTraits<IndexValueType>::max();
-    for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-      min_elem_in_class = std::min(min_elem_in_class, number_of_samples[classIdx]);
-
-    // If one class is empty, throw an error
-    if (min_elem_in_class == 0)
-    {
-      otbAppLogFATAL("There is at least one class with no sample!")
-    }
-
-    // Sampling step for each classes
-    vnl_vector<IndexValueType> step_for_class(number_of_classes, 0);
-
-    // Compute the sampling step for each classes, depending on the chosen strategy
-    switch (this->GetParameterInt("strategy"))
-    {
-    // constant
-    case 0:
-    {
-      // Set the target number of samples in each class
-      target_number_of_samples.fill(GetParameterInt("strategy.constant.nb"));
-
-      // re adjust the number of samples to select in each class
-      if (min_elem_in_class < target_number_of_samples[0])
-      {
-        otbAppLogWARNING("Smallest class has " << min_elem_in_class <<
-            " samples but a number of " << target_number_of_samples[0] <<
-            " is given. Using " << min_elem_in_class);
-        target_number_of_samples.fill( min_elem_in_class );
-      }
-
-      // Compute the sampling step
-      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-        step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
-    }
-    break;
-
-    // total
-    case 1:
-    {
-      // Compute the sampling step
-      IndexValueType step = number_of_samples.sum() / this->GetParameterInt("strategy.total.v");
-      if (step == 0)
-      {
-        otbAppLogWARNING("The number of samples available is smaller than the required number of samples. " <<
-            "Setting sampling step to 1.");
-        step = 1;
-      }
-      step_for_class.fill(step);
-
-      // Compute the target number of samples
-      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-        target_number_of_samples[classIdx] = number_of_samples[classIdx] / step;
-
-    }
-    break;
-
-    // smallest
-    case 2:
-    {
-      // Set the target number of samples to the smallest class
-      target_number_of_samples.fill( min_elem_in_class );
-
-      // Compute the sampling step
-      for (LabelImageType::InternalPixelType classIdx = 0 ; classIdx < number_of_classes ; classIdx++)
-        step_for_class[classIdx] = number_of_samples[classIdx] / target_number_of_samples[classIdx];
-
-    }
-    break;
-
-    // All
-    case 3:
-    {
-      // Easy
-      step_for_class.fill(1);
-      target_number_of_samples = number_of_samples;
-    }
-    break;
-    default:
-      otbAppLogFATAL("Strategy mode unknown :"<<this->GetParameterString("strategy"));
-      break;
-    }
-
-    // Print quick summary
-    otbAppLogINFO("Sampling summary:");
-    otbAppLogINFO("\tClass\tStep\tTot");
-    for (LabelImageType::InternalPixelType i = 0 ; i < number_of_classes ; i++)
-    {
-      vnl_vector<int> tmp (3,0);
-      tmp[0] = i + class_begin;
-      tmp[1] = step_for_class[i];
-      tmp[2] = target_number_of_samples[i];
-      otbAppLogINFO("\t" << tmp);
-    }
-
-    // Create a new vector data
-    // TODO: how to pre-allocate the datatree?
-    m_OutVectorData = VectorDataType::New();
-    DataTreeType::Pointer tree = m_OutVectorData->GetDataTree();
-    DataNodePointer root = tree->GetRoot()->Get();
-    DataNodePointer document = DataNodeType::New();
-    document->SetNodeType(DOCUMENT);
-    tree->Add(document, root);
-
-    // Duno if this makes sense?
-    m_OutVectorData->SetProjectionRef(inputImage->GetProjectionRef());
-    m_OutVectorData->SetOrigin(inputImage->GetOrigin());
-    m_OutVectorData->SetSpacing(inputImage->GetSpacing());
-
-    // Second iteration, to prepare the samples
-    vnl_vector<IndexValueType> sampledCount(number_of_classes, 0);
-    vnl_vector<IndexValueType> iteratorCount(number_of_classes, 0);
-    IndexValueType n_tot = 0;
-    const IndexValueType target_n_tot = target_number_of_samples.sum();
-    for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
-    {
-      LabelImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
-      tf::PropagateRequestedRegion<LabelImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<LabelImageType> inIt (inputImage, streamRegion);
-
-      for (inIt.GoToBegin() ; !inIt.IsAtEnd() ; ++inIt)
-      {
-        LabelImageType::InternalPixelType classVal = inIt.Get();
-
-        if (classVal != nodata)
-        {
-          classVal -= class_begin;
-
-          // Update the current position
-          iteratorCount[classVal]++;
-
-          // Every Xi samples (Xi is the step for class i)
-          if (iteratorCount[classVal] % ((int) step_for_class[classVal]) == 0 &&
-              sampledCount[classVal] < target_number_of_samples[classVal])
-          {
-            // Add this sample
-            sampledCount[classVal]++;
-            n_tot++;
-            ShowProgress(n_tot, target_n_tot);
-
-            // Create a point
-            LabelImageType::PointType geo;
-            inputImage->TransformIndexToPhysicalPoint(inIt.GetIndex(), geo);
-            DataNodeType::PointType point;
-            point[0] = geo[0];
-            point[1] = geo[1];
-
-            // Add point to the VectorData tree
-            DataNodePointer newDataNode = DataNodeType::New();
-            newDataNode->SetPoint(point);
-            newDataNode->SetFieldAsInt("class", static_cast<int>(classVal));
-            tree->Add(newDataNode, document);
-
-          } // sample this one
-        }
-      } // next pixel
-    } // next streaming region
-    ShowProgressDone();
-
-    otbAppLogINFO( "Number of samples in each class: " << sampledCount );
-
-    otbAppLogINFO( "Writing output vector data");
-
-    SetParameterOutputVectorData("outvec", m_OutVectorData);
-
-  }
-
-private:
-  VectorDataType::Pointer m_OutVectorData;
-
-}; // end of class
-
-} // end namespace wrapper
-} // end namespace otb
-
-OTB_APPLICATION_EXPORT(otb::Wrapper::LabelImageSampleSelection)
diff --git a/include/otbPatchesExtraction.cxx b/include/otbPatchesExtraction.cxx
deleted file mode 100644
index 33eb603c..00000000
--- a/include/otbPatchesExtraction.cxx
+++ /dev/null
@@ -1,261 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-#include "otbWrapperApplicationFactory.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// Filter
-#include "otbTensorflowSampler.h"
-
-// Stack
-#include "otbTensorflowSource.h"
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class PatchesExtraction : public Application
-{
-public:
-  /** Standard class typedefs. */
-  typedef PatchesExtraction                   Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(PatchesExtraction, Application);
-
-  /** Filter typedef */
-  typedef otb::TensorflowSampler<FloatVectorImageType, VectorDataType> SamplerType;
-
-  /** Typedefs for image concatenation */
-  typedef TensorflowSource<FloatVectorImageType>                       TFSourceType;
-
-  //
-  // Store stuff related to one source
-  //
-  struct SourceBundle
-  {
-    TFSourceType                       m_ImageSource;   // Image source
-    FloatVectorImageType::SizeType     m_PatchSize;          // Patch size
-
-    std::string                        m_KeyIn;   // Key of input image list
-    std::string                        m_KeyOut;  // Key of output samples image
-    std::string                        m_KeyPszX; // Key for samples sizes X
-    std::string                        m_KeyPszY; // Key for samples sizes Y
-    std::string                        m_KeyNoData; // Key for no-data value
-
-    FloatVectorImageType::InternalPixelType m_NoDataValue; // No data value
-  };
-
-
-  //
-  // Add an input source, which includes:
-  // -an input image list
-  // -an output image (samples)
-  // -an input patchsize (dimensions of samples)
-  //
-  void AddAnInputImage()
-  {
-    // Number of source
-    unsigned int inputNumber = m_Bundles.size() + 1;
-
-    // Create keys and descriptions
-    std::stringstream ss_group_key, ss_desc_group, ss_key_in, ss_key_out, ss_desc_in,
-    ss_desc_out, ss_key_dims_x, ss_desc_dims_x, ss_key_dims_y, ss_desc_dims_y, ss_key_nodata, ss_desc_nodata;
-    ss_group_key   << "source"                    << inputNumber;
-    ss_desc_group  << "Parameters for source "    << inputNumber;
-    ss_key_out     << ss_group_key.str()          << ".out";
-    ss_desc_out    << "Output patches for image " << inputNumber;
-    ss_key_in      << ss_group_key.str()          << ".il";
-    ss_desc_in     << "Input image(s) "           << inputNumber;
-    ss_key_dims_x  << ss_group_key.str()          << ".patchsizex";
-    ss_desc_dims_x << "X patch size for image "   << inputNumber;
-    ss_key_dims_y  << ss_group_key.str()          << ".patchsizey";
-    ss_desc_dims_y << "Y patch size for image "   << inputNumber;
-    ss_key_nodata  << ss_group_key.str()          << ".nodata";
-    ss_desc_nodata << "No-data value for image "   << inputNumber;
-
-    // Populate group
-    AddParameter(ParameterType_Group,          ss_group_key.str(),  ss_desc_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_in.str(),     ss_desc_in.str() );
-    AddParameter(ParameterType_OutputImage,    ss_key_out.str(),    ss_desc_out.str());
-    AddParameter(ParameterType_Int,            ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue               (ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int,            ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue               (ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_Float,          ss_key_nodata.str(), ss_desc_nodata.str());
-    MandatoryOff                              (ss_key_nodata.str());
-
-    // Add a new bundle
-    SourceBundle bundle;
-    bundle.m_KeyIn   = ss_key_in.str();
-    bundle.m_KeyOut  = ss_key_out.str();
-    bundle.m_KeyPszX = ss_key_dims_x.str();
-    bundle.m_KeyPszY = ss_key_dims_y.str();
-    bundle.m_KeyNoData = ss_key_nodata.str();
-
-    m_Bundles.push_back(bundle);
-
-  }
-
-  //
-  // Prepare bundles from the number of points
-  //
-  void PrepareInputs()
-  {
-    for (auto& bundle: m_Bundles)
-    {
-      // Create a stack of input images
-      FloatVectorImageListType::Pointer list = GetParameterImageList(bundle.m_KeyIn);
-      bundle.m_ImageSource.Set(list);
-
-      // Patch size
-      bundle.m_PatchSize[0] = GetParameterInt(bundle.m_KeyPszX);
-      bundle.m_PatchSize[1] = GetParameterInt(bundle.m_KeyPszY);
-
-      // No data value
-      if (HasValue(bundle.m_KeyNoData))
-	{
-        bundle.m_NoDataValue = GetParameterFloat(bundle.m_KeyNoData);
-        }
-
-    }
-  }
-
-  void DoInit()
-  {
-
-    // Documentation
-    SetName("PatchesExtraction");
-    SetDescription("This application extracts patches in multiple input images. Change "
-        "the " + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of "
-        "sources.");
-    SetDocLongDescription("The application takes an input vector layer which is a set of "
-        "points, typically the output of the \"SampleSelection\" or the \"LabelImageSampleSelection\" "
-        "application to sample patches in the input images (samples are centered on the points). "
-        "A \"source\" parameters group is composed of (i) an input image list (can be "
-        "one image e.g. high res. image, or multiple e.g. time series), (ii) the size "
-        "of the patches to sample, and (iii) the output images of patches which will "
-        "be generated at the end of the process. The example below show how to "
-        "set the samples sizes. For a SPOT6 image for instance, the patch size can "
-        "be 64x64 and for an input Sentinel-2 time series the patch size could be "
-        "1x1. Note that if a dimension size is not defined, the largest one will "
-        "be used (i.e. input image dimensions. The number of input sources can be changed "
-        "at runtime by setting the system environment variable " + tf::ENV_VAR_NAME_NSOURCES);
-
-    SetDocAuthors("Remi Cresson");
-
-    AddDocTag(Tags::Learning);
-
-    // Input/output images
-    AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
-      AddAnInputImage();
-
-    // Input vector data
-    AddParameter(ParameterType_InputVectorData, "vec", "Positions of the samples (must be in the same projection as input image)");
-
-    // Output label
-    AddParameter(ParameterType_OutputImage, "outlabels", "output labels");
-    SetDefaultOutputPixelType              ("outlabels", ImagePixelType_uint8);
-    MandatoryOff                           ("outlabels");
-
-    // Class field
-    AddParameter(ParameterType_String, "field", "field of class in the vector data");
-
-    // Examples values
-    SetDocExampleParameterValue("vec",                "points.sqlite");
-    SetDocExampleParameterValue("source1.il",         "$s2_list");
-    SetDocExampleParameterValue("source1.patchsizex", "16");
-    SetDocExampleParameterValue("source1.patchsizey", "16");
-    SetDocExampleParameterValue("field",              "class");
-    SetDocExampleParameterValue("source1.out",        "outpatches_16x16.tif");
-    SetDocExampleParameterValue("outlabels",          "outlabels.tif");
-
-  }
-
-  void DoExecute()
-  {
-
-    PrepareInputs();
-
-    // Setup the filter
-    SamplerType::Pointer sampler = SamplerType::New();
-    sampler->SetInputVectorData(GetParameterVectorData("vec"));
-    sampler->SetField(GetParameterAsString("field"));
-
-    for (auto& bundle: m_Bundles)
-    {
-      if (HasValue(bundle.m_KeyNoData)) 
-        {
-        otbAppLogINFO("Rejecting samples that have at least one no-data value");
-        sampler->PushBackInputWithPatchSize(bundle.m_ImageSource.Get(), bundle.m_PatchSize, bundle.m_NoDataValue);
-        }
-      else
-        {
-        sampler->PushBackInputWithPatchSize(bundle.m_ImageSource.Get(), bundle.m_PatchSize);
-        }
-    }
-
-    // Run the filter
-    AddProcess(sampler, "Sampling patches");
-    sampler->Update();
-
-    // Show numbers
-    otbAppLogINFO("Number of samples collected: " << sampler->GetNumberOfAcceptedSamples());
-    otbAppLogINFO("Number of samples rejected : " << sampler->GetNumberOfRejectedSamples());
-
-    // Save patches image
-    if (sampler->GetNumberOfAcceptedSamples()>0)
-    {
-      for (unsigned int i = 0 ; i < m_Bundles.size() ; i++)
-      {
-        SetParameterOutputImage(m_Bundles[i].m_KeyOut, sampler->GetOutputPatchImages()[i]);
-      }
-    }
-    else
-    {
-      otbAppLogFATAL("No patch to sample. Please check that your vector data falls inside your images, and no-data values.");
-    }
-
-
-    // Save label image (if needed)
-    if (HasValue("outlabels"))
-    {
-      SetParameterOutputImage("outlabels", sampler->GetOutputLabelImage());
-    }
-
-  }
-  
-  
-  void DoUpdateParameters()
-  {
-  }
-
-private:
-  std::vector<SourceBundle> m_Bundles;
-
-}; // end of class
-
-} // end namespace wrapper
-} // end namespace otb
-
-OTB_APPLICATION_EXPORT(otb::Wrapper::PatchesExtraction)
diff --git a/include/otbPatchesSelection.cxx b/include/otbPatchesSelection.cxx
deleted file mode 100644
index 68d76221..00000000
--- a/include/otbPatchesSelection.cxx
+++ /dev/null
@@ -1,825 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2022 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-#include "otbWrapperApplicationFactory.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// Image
-#include "itkImageRegionConstIterator.h"
-#include "itkUnaryFunctorImageFilter.h"
-#include "itkFlatStructuringElement.h"
-#include "itkBinaryErodeImageFilter.h"
-#include "otbStreamingResampleImageFilter.h"
-#include "itkNearestNeighborInterpolateImageFunction.h"
-#include "itkMaskImageFilter.h"
-
-// Image utils
-#include "otbTensorflowCommon.h"
-#include "otbTensorflowSamplingUtils.h"
-#include "itkImageRegionConstIteratorWithOnlyIndex.h"
-
-// Math
-#include <random>
-#include <limits>
-
-// Functor to retrieve nodata
-template<class TPixel, class OutputPixel>
-class IsNoData
-{
-public:
-  IsNoData(){}
-  ~IsNoData(){}
-
-  inline OutputPixel operator()( const TPixel & A ) const
-  {
-    for (unsigned int band = 0 ; band < A.Size() ; band++)
-    {
-      if (A[band] != m_NoDataValue)
-        return 1;
-    }
-    return 0;
-  }
-
-  void SetNoDataValue(typename TPixel::ValueType value)
-  {
-    m_NoDataValue = value;
-  }
-
-private:
-  typename TPixel::ValueType m_NoDataValue;
-};
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class PatchesSelection : public Application
-{
-public:
-  /** Standard class typedefs. */
-  typedef PatchesSelection                    Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(PatchesSelection, Application);
-
-  /** Vector data typedefs */
-  typedef VectorDataType::DataTreeType                 DataTreeType;
-  typedef itk::PreOrderTreeIterator<DataTreeType>      TreeIteratorType;
-  typedef VectorDataType::DataNodeType                 DataNodeType;
-  typedef DataNodeType::Pointer                        DataNodePointer;
-  typedef DataNodeType::PointType                      DataNodePointType;
-
-  /** typedefs */
-  typedef IsNoData<FloatVectorImageType::PixelType, UInt8ImageType::PixelType > IsNoDataFunctorType;
-  typedef itk::UnaryFunctorImageFilter<FloatVectorImageType, UInt8ImageType, IsNoDataFunctorType> IsNoDataFilterType;
-
-  typedef itk::FlatStructuringElement<2>                                         StructuringType;
-  typedef StructuringType::RadiusType                                            RadiusType;
-
-  typedef itk::BinaryErodeImageFilter<UInt8ImageType, UInt8ImageType, StructuringType> MorphoFilterType;
-
-  typedef otb::StreamingResampleImageFilter<UInt8ImageType,UInt8ImageType> PadFilterType;
-  typedef itk::NearestNeighborInterpolateImageFunction<UInt8ImageType> NNInterpolatorType;
-
-  typedef tf::Distribution<UInt8ImageType> DistributionType;
-
-  typedef itk::MaskImageFilter<UInt8ImageType, UInt8ImageType, UInt8ImageType> MaskImageFilterType;
-
-  void DoInit()
-  {
-
-    // Documentation
-    SetName("PatchesSelection");
-    SetDescription("This application generate points sampled at regular interval over "
-        "the input image region. The grid size and spacing can be configured.");
-    SetDocLongDescription("This application produces a vector data containing "
-        "a set of points centered on the patches lying in the valid regions of the input image. ");
-
-    SetDocAuthors("Remi Cresson");
-
-    // Input image
-    AddParameter(ParameterType_InputImage, "in", "input image");
-    AddParameter(ParameterType_InputImage, "mask", "input mask");
-    MandatoryOff("mask");
-
-    // Input no-data value
-    AddParameter(ParameterType_Float, "nodata", "nodata value");
-    MandatoryOff                     ("nodata");
-
-    // Grid
-    AddParameter(ParameterType_Group, "grid", "grid settings");
-    AddParameter(ParameterType_Int, "grid.step", "step between patches");
-    SetMinimumParameterIntValue    ("grid.step", 1);
-    AddParameter(ParameterType_Int, "grid.psize", "patches size");
-    SetMinimumParameterIntValue    ("grid.psize", 1);
-    AddParameter(ParameterType_Int, "grid.offsetx", "offset of the grid (x axis)");
-    SetDefaultParameterInt         ("grid.offsetx", 0);
-    AddParameter(ParameterType_Int, "grid.offsety", "offset of the grid (y axis)");
-    SetDefaultParameterInt         ("grid.offsety", 0);
-
-    // Strategy
-    AddParameter(ParameterType_Choice, "strategy", "Selection strategy for validation/training patches");
-    // Chess board
-    AddChoice("strategy.chessboard", "Fifty fifty with chess-board-like layout. Only \"outtrain\" and "
-        "\"outvalid\" output parameters are used.");
-    // Split
-    AddChoice("strategy.split", "The traditional training/validation/test split. The \"outtrain\", "
-        "\"outvalid\" and \"outtest\" output parameters are used.");
-    AddParameter(ParameterType_Bool, "strategy.split.random", "If false, samples will always be from "
-        "the same group");
-    MandatoryOff                     ("strategy.split.random");
-    AddParameter(ParameterType_Float, "strategy.split.trainprop", "Proportion of training population.");
-    SetMinimumParameterFloatValue    ("strategy.split.trainprop", 0.0);
-    SetDefaultParameterFloat         ("strategy.split.trainprop", 50.0);
-    AddParameter(ParameterType_Float, "strategy.split.validprop", "Proportion of validation population.");
-    SetMinimumParameterFloatValue    ("strategy.split.validprop", 0.0);
-    SetDefaultParameterFloat         ("strategy.split.validprop", 25.0);
-    AddParameter(ParameterType_Float, "strategy.split.testprop", "Proportion of test population.");
-    SetMinimumParameterFloatValue    ("strategy.split.testprop", 0.0);
-    SetDefaultParameterFloat         ("strategy.split.testprop", 25.0);
-    // All
-    AddChoice("strategy.all", "All locations. Only the \"outtrain\" output parameter is used.");
-    // Balanced (experimental)
-    AddChoice("strategy.balanced", "you can chose the degree of spatial randomness vs class balance");
-    AddParameter(ParameterType_Float, "strategy.balanced.sp", "Spatial proportion: between 0 and 1, "
-        "indicating the amount of randomly sampled data in space");
-    SetMinimumParameterFloatValue    ("strategy.balanced.sp", 0);
-    SetMaximumParameterFloatValue    ("strategy.balanced.sp", 1);
-    SetDefaultParameterFloat         ("strategy.balanced.sp", 0.25);
-    AddParameter(ParameterType_Int,   "strategy.balanced.nclasses", "Number of classes");
-    SetMinimumParameterIntValue      ("strategy.balanced.nclasses", 2);
-    MandatoryOn                      ("strategy.balanced.nclasses");
-    AddParameter(ParameterType_InputImage, "strategy.balanced.labelimage", "input label image");
-    MandatoryOn                           ("strategy.balanced.labelimage");
-
-    // Output points
-    AddParameter(ParameterType_OutputVectorData, "outtrain", "output set of points (training)");
-    AddParameter(ParameterType_OutputVectorData, "outvalid", "output set of points (validation)");
-    MandatoryOff("outvalid");
-    AddParameter(ParameterType_OutputVectorData, "outtest", "output set of points (test)");
-    MandatoryOff("outtest");
-
-    AddRAMParameter();
-
-  }
-
-  class SampleBundle
-  {
-  public:
-    SampleBundle(){}
-    explicit SampleBundle(unsigned int nClasses): dist(DistributionType(nClasses)), id(0), group(true){
-      (void) point;
-      (void) index;
-    }
-    ~SampleBundle(){}
-
-    SampleBundle(const SampleBundle & other): dist(other.GetDistribution()), id(other.GetSampleID()),
-      point(other.GetPosition()), group(other.GetGroup()), index(other.GetIndex())
-    {}
-
-    DistributionType GetDistribution() const
-    {
-      return dist;
-    }
-
-    DistributionType& GetModifiableDistribution()
-    {
-      return dist;
-    }
-
-    unsigned int GetSampleID() const
-    {
-      return id;
-    }
-
-    unsigned int& GetModifiableSampleID()
-    {
-      return id;
-    }
-
-    DataNodePointType GetPosition() const
-    {
-      return point;
-    }
-
-    DataNodePointType& GetModifiablePosition()
-    {
-      return point;
-    }
-
-    int& GetModifiableGroup()
-    {
-      return group;
-    }
-
-    int GetGroup() const
-    {
-      return group;
-    }
-
-    UInt8ImageType::IndexType& GetModifiableIndex()
-    {
-      return index;
-    }
-
-    UInt8ImageType::IndexType GetIndex() const
-    {
-      return index;
-    }
-
-  private:
-
-    DistributionType dist;
-    unsigned int id;
-    DataNodePointType point;
-    int group;
-    UInt8ImageType::IndexType index;
-  };
-
-  /*
-   * Apply the given function at each sampling location, checking if the patch is valid or not
-   */
-  template<typename TLambda>
-  void Apply(TLambda lambda)
-  {
-
-    int userOffX = GetParameterInt("grid.offsetx");
-    int userOffY = GetParameterInt("grid.offsety");
-
-    // Tell if the patch size is odd or even
-    const bool isEven = GetParameterInt("grid.psize") % 2 == 0;
-    otbAppLogINFO("Patch size is even: " << isEven);
-
-    // Explicit streaming over the morphed mask, based on the RAM parameter
-    typedef otb::RAMDrivenStrippedStreamingManager<UInt8ImageType> StreamingManagerType;
-    StreamingManagerType::Pointer m_StreamingManager = StreamingManagerType::New();
-    m_StreamingManager->SetAvailableRAMInMB(GetParameterInt("ram"));
-
-    UInt8ImageType::Pointer inputImage;
-    bool readInput = true;
-    if (!HasValue("nodata"))
-      {
-      otbAppLogINFO("No value specified for no-data. Input image pixels no-data values will not be checked.");
-      if (HasValue("mask"))
-        {
-        otbAppLogINFO("Using the provided \"mask\" parameter.");
-        inputImage = GetParameterUInt8Image("mask");
-        }
-      else
-        {
-        // This is just a hack to not trigger the whole morpho/pad pipeline
-        inputImage = m_NoDataFilter->GetOutput();
-        readInput = false;
-        }
-      }
-    else
-      {
-      inputImage = m_MorphoFilter->GetOutput();
-
-      // Offset update because the morpho filter pads the input image with 1 pixel border
-      userOffX += 1;
-      userOffY += 1;
-      }
-    UInt8ImageType::RegionType entireRegion = inputImage->GetLargestPossibleRegion();
-    entireRegion.ShrinkByRadius(m_Radius);
-    m_StreamingManager->PrepareStreaming(inputImage, entireRegion );
-    UInt8ImageType::IndexType start;
-    start[0] = m_Radius[0] + 1;
-    start[1] = m_Radius[1] + 1;
-
-    int m_NumberOfDivisions = m_StreamingManager->GetNumberOfSplits();
-    UInt8ImageType::IndexType pos;
-    UInt8ImageType::IndexValueType step = GetParameterInt("grid.step");
-    pos.Fill(0);
-
-    // Offset update
-    userOffX %= step ;
-    userOffY %= step ;
-
-    for (int m_CurrentDivision = 0; m_CurrentDivision < m_NumberOfDivisions; m_CurrentDivision++)
-    {
-      otbAppLogINFO("Processing split " << (m_CurrentDivision + 1) << "/" << m_NumberOfDivisions);
-
-      UInt8ImageType::RegionType streamRegion = m_StreamingManager->GetSplit(m_CurrentDivision);
-      tf::PropagateRequestedRegion<UInt8ImageType>(inputImage, streamRegion);
-      itk::ImageRegionConstIterator<UInt8ImageType> inIt (inputImage, streamRegion);
-
-      for (inIt.GoToBegin(); !inIt.IsAtEnd(); ++inIt)
-      {
-        UInt8ImageType::IndexType idx = inIt.GetIndex();
-        idx[0] -= start[0];
-        idx[1] -= start[1];
-
-        if (idx[0] % step == userOffX && idx[1] % step == userOffY)
-        {
-          UInt8ImageType::InternalPixelType pixVal = 1;
-          if (readInput)
-            pixVal = inIt.Get();
-
-          if (pixVal == 1)
-          {
-            // Update grid position
-            pos[0] = idx[0] / step;
-            pos[1] = idx[1] / step;
-
-            // Compute coordinates
-            UInt8ImageType::PointType geo;
-            inputImage->TransformIndexToPhysicalPoint(inIt.GetIndex(), geo);
-
-            // Update geo if we want the corner or the center
-            if (isEven)
-            {
-              geo[0] -= 0.5 * std::abs(inputImage->GetSpacing()[0]);
-              geo[1] -= 0.5 * std::abs(inputImage->GetSpacing()[1]);
-            }
-
-            // Lambda call
-            lambda(pos, geo);
-          }
-        }
-      }
-
-    }
-  }
-
-  /*
-   * Allocate a std::vector of sample bundle
-   */
-  std::vector<SampleBundle>
-  AllocateSamples(unsigned int nbOfClasses = 2)
-  {
-    // Nb of samples (maximum)
-    const UInt8ImageType::RegionType entireRegion = m_NoDataFilter->GetOutput()->GetLargestPossibleRegion();
-    const unsigned int maxNbOfCols = std::ceil(entireRegion.GetSize(0)/GetParameterInt("grid.step")) + 1;
-    const unsigned int maxNbOfRows = std::ceil(entireRegion.GetSize(1)/GetParameterInt("grid.step")) + 1;
-    unsigned int maxNbOfSamples = 1;
-    maxNbOfSamples *= maxNbOfCols;
-    maxNbOfSamples *= maxNbOfRows;
-
-    // Nb of classes
-    SampleBundle initSB(nbOfClasses);
-    std::vector<SampleBundle> bundles(maxNbOfSamples, initSB);
-
-    return bundles;
-  }
-
-  void SetBlackOrWhiteBundle(SampleBundle & bundle, unsigned int & count,
-      const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo)
-  {
-    // Black or white
-    int black = (pos[0] + pos[1]) % 2;
-
-    bundle.GetModifiableSampleID() = count;
-    bundle.GetModifiablePosition() = geo;
-    bundle.GetModifiableGroup() = black;
-    bundle.GetModifiableIndex() = pos;
-    count++;
-
-  }
-
-  /*
-   * Samples are placed at regular intervals with the same layout as a chessboard,
-   * in two groups (A: black, B: white)
-   */
-  void SampleChessboard()
-  {
-
-    std::vector<SampleBundle> bundles = AllocateSamples();
-
-    unsigned int count = 0;
-    auto lambda = [this, &count, &bundles]
-                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
-      SetBlackOrWhiteBundle(bundles[count], count, pos, geo);
-    };
-
-    Apply(lambda);
-    bundles.resize(count);
-
-    // Export training/validation samples
-    PopulateVectorData(bundles);
-  }
-
-  void SetSplitBundle(SampleBundle & bundle, unsigned int & count,
-      const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo,
-      const std::vector<int> & groups)
-  {
-
-    bundle.GetModifiableGroup() = groups[count];
-    bundle.GetModifiableSampleID() = count;
-    bundle.GetModifiablePosition() = geo;
-    bundle.GetModifiableIndex() = pos;
-    count++;
-  }
-
-  /*
-   * Samples are split in training/validation/test groups
-   */
-  void SampleSplit(float trp, float vp, float tp)
-  {
-
-    std::vector<SampleBundle> bundles = AllocateSamples();
-
-    // Populate groups
-    unsigned int nbSamples = bundles.size();
-    float tot = (trp + vp + tp);
-    std::vector<float> props = {trp, vp, tp};
-    std::vector<float> incs, counts;
-    for (auto& prop: props)
-    {
-      if (prop > 0)
-      {
-        incs.push_back(tot / prop);
-        counts.push_back(.0);
-      }
-      else
-        {
-        incs.push_back(.0);
-        counts.push_back((float) nbSamples);
-        }
-    }
-    std::vector<int> groups;
-    for (unsigned int i = 0; i < nbSamples; i++)
-    {
-      // Find the group with the less samples
-      auto it = std::min_element(std::begin(counts), std::end(counts));
-      auto idx = std::distance(std::begin(counts), it);
-      assert (idx > 0);
-      // Assign the group number, and update counts
-      groups.push_back(idx);
-      counts[idx] += incs[idx];
-    }
-    if (GetParameterInt("strategy.split.random") > 0)
-    {
-      // Shuffle groups
-      auto rng = std::default_random_engine {};
-      std::shuffle(std::begin(groups), std::end(groups), rng);
-    }
-
-    unsigned int count = 0;
-    auto lambda = [this, &count, &bundles, &groups]
-                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
-      SetSplitBundle(bundles[count], count, pos, geo, groups);
-    };
-
-    Apply(lambda);
-    bundles.resize(count);
-
-    // Export training/validation samples
-    PopulateVectorData(bundles);
-  }
-
-  void SampleBalanced()
-  {
-
-    // 1. Compute distribution of all samples
-
-    otbAppLogINFO("Computing samples distribution...");
-
-    std::vector<SampleBundle> bundles = AllocateSamples(GetParameterInt("strategy.balanced.nclasses"));
-
-    // Patch size
-    UInt8ImageType::SizeType patchSize;
-    patchSize.Fill(GetParameterInt("grid.psize"));
-    unsigned int count = 0;
-    auto lambda = [this, &bundles, &patchSize, &count]
-                   (const UInt8ImageType::IndexType & pos, const UInt8ImageType::PointType & geo) {
-
-      // Update this sample distribution
-      if (tf::UpdateDistributionFromPatch<UInt8ImageType>(GetParameterUInt8Image("strategy.balanced.labelimage"),
-          geo, patchSize, bundles[count].GetModifiableDistribution()))
-      {
-        SetBlackOrWhiteBundle(bundles[count], count, pos, geo);
-      }
-    };
-
-    Apply(lambda);
-    bundles.resize(count);
-
-    otbAppLogINFO("Total number of candidates: " << count );
-
-    // 2. Seed = spatially random samples
-
-    otbAppLogINFO("Spatial sampling proportion " << GetParameterFloat("strategy.balanced.sp"));
-
-    const int samplingStep = static_cast<int>(1.0 / std::sqrt(GetParameterFloat("strategy.balanced.sp")));
-
-    otbAppLogINFO("Spatial sampling step " << samplingStep);
-
-    float step = 0;
-    std::vector<SampleBundle> seed(count);
-    std::vector<SampleBundle> candidates(count);
-
-    unsigned int seedCount = 0;
-    unsigned int candidatesCount = 0;
-    for (auto& d: bundles)
-    {
-      if (d.GetIndex()[0] % samplingStep + d.GetIndex()[1] % samplingStep == 0)
-      {
-        seed[seedCount] = d;
-        seedCount++;
-      }
-      else
-      {
-        candidates[candidatesCount] = d;
-        candidatesCount++;
-      }
-      step++;
-    }
-
-    seed.resize(seedCount);
-    candidates.resize(candidatesCount);
-
-    otbAppLogINFO("Spatial seed has " << seedCount << " samples");
-
-    unsigned int nbToRemove = static_cast<unsigned int>(seedCount - GetParameterFloat("strategy.balanced.sp") * count);
-
-    otbAppLogINFO("Adjust spatial seed removing " << nbToRemove << " samples");
-
-    float removalRate = static_cast<float>(seedCount) / static_cast<float>(nbToRemove);
-    float removalStep = 0;
-    auto removeSamples = [&removalStep, &removalRate](SampleBundle & b) -> bool {
-      (void) b;
-      bool ret = false;
-      if (removalStep >= removalRate)
-        {
-        removalStep = fmod(removalStep, removalRate);
-        ret = true;
-        }
-      else
-        ret = false;
-      removalStep++;
-      return ret;;
-    };
-    auto iterator = std::remove_if(seed.begin(), seed.end(), removeSamples);
-    seed.erase(iterator, seed.end());
-
-    otbAppLogINFO("Spatial seed size : " << seed.size());
-
-    // 3. Compute seed distribution
-
-    const unsigned int nbOfClasses = GetParameterInt("strategy.balanced.nclasses");
-    DistributionType seedDist(nbOfClasses);
-    for (auto& d: seed)
-      seedDist.Update(d.GetDistribution());
-
-    otbAppLogINFO("Spatial seed distribution: " << seedDist.ToString());
-
-    // 4. Select other samples to feed the seed
-
-    otbAppLogINFO("Balance seed candidates size: " << candidates.size());
-
-    // Sort by cos
-    auto comparator = [&seedDist](const SampleBundle & a, const SampleBundle & b) -> bool{
-      return a.GetDistribution().Cosinus(seedDist) > b.GetDistribution().Cosinus(seedDist);
-    };
-    sort(candidates.begin(), candidates.end(), comparator);
-
-    DistributionType idealDist(nbOfClasses, 1.0 / std::sqrt(static_cast<float>(nbOfClasses)));
-    float minCos = 0;
-    unsigned int samplesAdded = 0;
-    seed.resize(seed.size()+candidates.size(), SampleBundle(nbOfClasses));
-    while(candidates.size() > 0)
-    {
-      // Get the less correlated sample
-      SampleBundle candidate = candidates.back();
-
-      // Update distribution
-      seedDist.Update(candidate.GetDistribution());
-
-      // Compute cos of the updated distribution
-      float idealCos = seedDist.Cosinus(idealDist);
-      if (idealCos > minCos)
-      {
-        minCos = idealCos;
-        seed[seedCount] = candidate;
-        seedCount++;
-        candidates.pop_back();
-        samplesAdded++;
-      }
-      else
-      {
-        break;
-      }
-    }
-    seed.resize(seedCount);
-
-    otbAppLogINFO("Final samples number: " << seed.size() << " (" << samplesAdded << " samples added)");
-    otbAppLogINFO("Final samples distribution: " << seedDist.ToString());
-
-    // 5. Export training/validation samples
-    PopulateVectorData(seed);
-  }
-
-  void PopulateVectorData(const std::vector<SampleBundle> & samples)
-  {
-    // Get data tree
-    DataTreeType::Pointer treeTrain = m_OutVectorDataTrain->GetDataTree();
-    DataTreeType::Pointer treeValid = m_OutVectorDataValid->GetDataTree();
-    DataTreeType::Pointer treeTest = m_OutVectorDataTest->GetDataTree();
-    DataNodePointer rootTrain = treeTrain->GetRoot()->Get();
-    DataNodePointer rootValid = treeValid->GetRoot()->Get();
-    DataNodePointer rootTest = treeTest->GetRoot()->Get();
-    DataNodePointer documentTrain = DataNodeType::New();
-    DataNodePointer documentValid = DataNodeType::New();
-    DataNodePointer documentTest = DataNodeType::New();
-    documentTrain->SetNodeType(DOCUMENT);
-    documentValid->SetNodeType(DOCUMENT);
-    documentTest->SetNodeType(DOCUMENT);
-    treeTrain->Add(documentTrain, rootTrain);
-    treeValid->Add(documentValid, rootValid);
-    treeTest->Add(documentTest, rootTest);
-
-    unsigned int id = 0;
-    for (const auto& sample: samples)
-    {
-      // Add point to the VectorData tree
-      DataNodePointer newDataNode = DataNodeType::New();
-      newDataNode->SetPoint(sample.GetPosition());
-      newDataNode->SetFieldAsInt("id", id);
-      id++;
-
-      // select this sample
-      if (sample.GetGroup() == 0)
-      {
-        // Train
-        treeTrain->Add(newDataNode, documentTrain);
-      }
-      else if (sample.GetGroup() == 1)
-      {
-        // Valid
-        treeValid->Add(newDataNode, documentValid);
-      }
-      else if (sample.GetGroup() == 2)
-      {
-        // Test
-        treeTest->Add(newDataNode, documentTest);
-      }
-
-    }
-  }
-
-  void DoExecute()
-  {
-    otbAppLogINFO("Grid step : " << this->GetParameterInt("grid.step"));
-    otbAppLogINFO("Patch size : " << this->GetParameterInt("grid.psize"));
-
-    // Compute no-data mask
-    m_NoDataFilter = IsNoDataFilterType::New();
-    float nodataValue = std::numeric_limits<float>::quiet_NaN();
-    if (HasValue("nodata"))
-    {
-      nodataValue = GetParameterFloat("nodata");
-    }
-    m_NoDataFilter->GetFunctor().SetNoDataValue(nodataValue);
-    m_NoDataFilter->SetInput(GetParameterFloatVectorImage("in"));
-    m_NoDataFilter->UpdateOutputInformation();
-    UInt8ImageType::Pointer src = m_NoDataFilter->GetOutput();
-
-    // If mask available, use it
-    if (HasValue("mask"))
-      {
-      if (GetParameterUInt8Image("mask")->GetLargestPossibleRegion().GetSize() !=
-          GetParameterFloatVectorImage("in")->GetLargestPossibleRegion().GetSize())
-        otbAppLogFATAL("Mask must have the same size as the input image!");
-      m_MaskImageFilter = MaskImageFilterType::New();
-      m_MaskImageFilter->SetInput(m_NoDataFilter->GetOutput());
-      m_MaskImageFilter->SetMaskImage(GetParameterUInt8Image("mask"));
-      m_MaskImageFilter->UpdateOutputInformation();
-      src = m_MaskImageFilter->GetOutput();
-      }
-
-    // Padding 1 pixel
-    UInt8ImageType::SizeType size = src->GetLargestPossibleRegion().GetSize();
-    size[0] += 2;
-    size[1] += 2;
-    UInt8ImageType::SpacingType spacing = src->GetSignedSpacing();
-    UInt8ImageType::PointType origin = src->GetOrigin();
-    origin[0] -= spacing[0];
-    origin[1] -= spacing[1];
-    m_PadFilter = PadFilterType::New();
-    NNInterpolatorType::Pointer nnInterpolator = NNInterpolatorType::New();
-    m_PadFilter->SetInterpolator(nnInterpolator);
-    m_PadFilter->SetInput( src );
-    m_PadFilter->SetOutputOrigin(origin);
-    m_PadFilter->SetOutputSpacing(spacing);
-    m_PadFilter->SetOutputSize(size);
-    m_PadFilter->SetEdgePaddingValue( 0 );
-    m_PadFilter->UpdateOutputInformation();
-
-    // Morpho
-    m_Radius[0] = this->GetParameterInt("grid.psize") / 2;
-    m_Radius[1] = this->GetParameterInt("grid.psize") / 2;
-    StructuringType se = StructuringType::Box(m_Radius);
-    m_MorphoFilter = MorphoFilterType::New();
-    m_MorphoFilter->SetKernel(se);
-    m_MorphoFilter->SetInput(m_PadFilter->GetOutput());
-    m_MorphoFilter->SetForegroundValue(1);
-    m_MorphoFilter->SetBackgroundValue(0);
-    m_MorphoFilter->UpdateOutputInformation();
-
-    // Prepare output vector data
-    m_OutVectorDataTrain = VectorDataType::New();
-    m_OutVectorDataValid = VectorDataType::New();
-    m_OutVectorDataTest = VectorDataType::New();
-    m_OutVectorDataTrain->SetProjectionRef(m_MorphoFilter->GetOutput()->GetProjectionRef());
-    m_OutVectorDataValid->SetProjectionRef(m_MorphoFilter->GetOutput()->GetProjectionRef());
-    m_OutVectorDataTest->SetProjectionRef(m_MorphoFilter->GetOutput()->GetProjectionRef());
-
-    if (GetParameterAsString("strategy") == "chessboard")
-    {
-      otbAppLogINFO("Sampling at regular interval in space (\"Chessboard\" like)");
-
-      SampleChessboard();
-
-      if (HasValue("outtest"))
-      {
-        otbAppLogWARNING("The \"outtest\" parameter is unused with the \"chessboard\" sampling strategy.")
-      }
-    }
-    else if (GetParameterAsString("strategy") == "balanced")
-    {
-      otbAppLogINFO("Sampling with balancing strategy");
-
-      SampleBalanced();
-    }
-    else if (GetParameterAsString("strategy") == "split")
-    {
-      otbAppLogINFO("Sampling with split strategy (Train/Validation/test)");
-      float vp = .0;
-      float tp = .0;
-      if (HasValue("outvalid"))
-      {
-        vp = GetParameterFloat("strategy.split.validprop");
-      }
-      if (HasValue("outtest"))
-      {
-        tp = GetParameterFloat("strategy.split.testprop");
-      }
-
-      SampleSplit(GetParameterFloat("strategy.split.trainprop"), vp, tp);
-    }
-    else if (GetParameterAsString("strategy") == "all")
-    {
-      otbAppLogINFO("Sampling all locations (only \"outtrain\" output parameter will be used");
-
-      SampleSplit(1.0, .0, .0);
-
-      if (HasValue("outtest") || HasValue("outvalid"))
-      {
-        otbAppLogWARNING("The \"outvalid\" and \"outtest\" parameters are unused with the \"all\" sampling strategy.")
-      }
-    }
-
-    otbAppLogINFO( "Writing output samples positions");
-
-    SetParameterOutputVectorData("outtrain", m_OutVectorDataTrain);
-    if (HasValue("outvalid") && GetParameterAsString("strategy") != "all")
-    {
-      SetParameterOutputVectorData("outvalid", m_OutVectorDataValid);
-    }
-    if (HasValue("outtest") && GetParameterAsString("strategy") == "split")
-    {
-      SetParameterOutputVectorData("outtest", m_OutVectorDataTest);
-    }
-
-  }
-
-
-  void DoUpdateParameters()
-  {
-  }
-
-private:
-  RadiusType                   m_Radius;
-  IsNoDataFilterType::Pointer  m_NoDataFilter;
-  PadFilterType::Pointer       m_PadFilter;
-  MorphoFilterType::Pointer    m_MorphoFilter;
-  VectorDataType::Pointer      m_OutVectorDataTrain;
-  VectorDataType::Pointer      m_OutVectorDataValid;
-  VectorDataType::Pointer      m_OutVectorDataTest;
-  MaskImageFilterType::Pointer m_MaskImageFilter;
-}; // end of class
-
-} // end namespace wrapper
-} // end namespace otb
-
-OTB_APPLICATION_EXPORT( otb::Wrapper::PatchesSelection )
diff --git a/include/otbTensorflowModelServe.cxx b/include/otbTensorflowModelServe.cxx
deleted file mode 100644
index 47a8c957..00000000
--- a/include/otbTensorflowModelServe.cxx
+++ /dev/null
@@ -1,350 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-#include "otbWrapperApplicationFactory.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// Tensorflow SavedModel
-#include "tensorflow/cc/saved_model/loader.h"
-
-// Tensorflow model filter
-#include "otbTensorflowMultisourceModelFilter.h"
-
-// Tensorflow graph load
-#include "otbTensorflowGraphOperations.h"
-
-// Layerstack
-#include "otbTensorflowSource.h"
-
-// Streaming
-#include "otbTensorflowStreamerFilter.h"
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class TensorflowModelServe : public Application
-{
-public:
-  /** Standard class typedefs. */
-  typedef TensorflowModelServe                       Self;
-  typedef Application                                Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(TensorflowModelServe, Application);
-
-  /** Typedefs for tensorflow */
-  typedef otb::TensorflowMultisourceModelFilter<FloatVectorImageType, FloatVectorImageType> TFModelFilterType;
-  typedef otb::TensorflowSource<FloatVectorImageType> InputImageSource;
-
-  /** Typedef for streaming */
-  typedef otb::ImageRegionSquareTileSplitter<FloatVectorImageType::ImageDimension> TileSplitterType;
-  typedef otb::TensorflowStreamerFilter<FloatVectorImageType, FloatVectorImageType> StreamingFilterType;
-
-  /** Typedefs for images */
-  typedef FloatVectorImageType::SizeType SizeType;
-
-  //
-  // Store stuff related to one source
-  //
-  struct ProcessObjectsBundle
-  {
-    InputImageSource m_ImageSource;
-    SizeType         m_PatchSize;
-    std::string      m_Placeholder;
-
-    // Parameters keys
-    std::string m_KeyIn;     // Key of input image list
-    std::string m_KeyPszX;   // Key for samples sizes X
-    std::string m_KeyPszY;   // Key for samples sizes Y
-    std::string m_KeyPHName; // Key for placeholder name in the tensorflow model
-  };
-
-  //
-  // Add an input source, which includes:
-  // -an input image list
-  // -an input patchsize (dimensions of samples)
-  //
-  void AddAnInputImage()
-  {
-    // Number of source
-    unsigned int inputNumber = m_Bundles.size() + 1;
-
-    // Create keys and descriptions
-    std::stringstream ss_key_group, ss_desc_group,
-    ss_key_in, ss_desc_in,
-    ss_key_dims_x, ss_desc_dims_x,
-    ss_key_dims_y, ss_desc_dims_y,
-    ss_key_ph, ss_desc_ph;
-
-    // Parameter group key/description
-    ss_key_group  << "source"                  << inputNumber;
-    ss_desc_group << "Parameters for source #" << inputNumber;
-
-    // Parameter group keys
-    ss_key_in      << ss_key_group.str() << ".il";
-    ss_key_dims_x  << ss_key_group.str() << ".rfieldx";
-    ss_key_dims_y  << ss_key_group.str() << ".rfieldy";
-    ss_key_ph      << ss_key_group.str() << ".placeholder";
-
-    // Parameter group descriptions
-    ss_desc_in     << "Input image (or list to stack) for source #" << inputNumber;
-    ss_desc_dims_x << "Input receptive field (width) for source #"  << inputNumber;
-    ss_desc_dims_y << "Input receptive field (height) for source #" << inputNumber;
-    ss_desc_ph     << "Name of the input placeholder for source #"  << inputNumber;
-
-    // Populate group
-    AddParameter(ParameterType_Group,          ss_key_group.str(),  ss_desc_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_in.str(),     ss_desc_in.str() );
-    AddParameter(ParameterType_Int,            ss_key_dims_x.str(), ss_desc_dims_x.str());
-    SetMinimumParameterIntValue               (ss_key_dims_x.str(), 1);
-    SetDefaultParameterInt                    (ss_key_dims_x.str(), 1);
-    AddParameter(ParameterType_Int,            ss_key_dims_y.str(), ss_desc_dims_y.str());
-    SetMinimumParameterIntValue               (ss_key_dims_y.str(), 1);
-    SetDefaultParameterInt                    (ss_key_dims_y.str(), 1);
-    AddParameter(ParameterType_String,         ss_key_ph.str(),     ss_desc_ph.str());
-    MandatoryOff                              (ss_key_ph.str());
-
-    // Add a new bundle
-    ProcessObjectsBundle bundle;
-    bundle.m_KeyIn     = ss_key_in.str();
-    bundle.m_KeyPszX   = ss_key_dims_x.str();
-    bundle.m_KeyPszY   = ss_key_dims_y.str();
-    bundle.m_KeyPHName = ss_key_ph.str();
-
-    m_Bundles.push_back(bundle);
-
-  }
-
-  void DoInit()
-  {
-
-    // Documentation
-    SetName("TensorflowModelServe");
-    SetDescription("Multisource deep learning classifier using TensorFlow. Change the "
-        + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of sources.");
-    SetDocLongDescription("The application run a TensorFlow model over multiple data sources. "
-        "The number of input sources can be changed at runtime by setting the system "
-        "environment variable " + tf::ENV_VAR_NAME_NSOURCES + ". For each source, you have to "
-        "set (1) the placeholder name, as named in the TensorFlow model, (2) the receptive "
-        "field and (3) the image(s) source. The output is a multiband image, stacking all "
-        "outputs tensors together: you have to specify (1) the names of the output tensors, as "
-        "named in the TensorFlow model (typically, an operator's output) and (2) the expression "
-        "field of each output tensor. The output tensors values will be stacked in the same "
-        "order as they appear in the \"model.output\" parameter (you can use a space separator "
-        "between names). You might consider to use extended filename to bypass the automatic "
-        "memory footprint calculator of the otb application engine, and set a good splitting "
-        "strategy (Square tiles is good for convolutional networks) or use the \"optim\" "
-        "parameter group to impose your squared tiles sizes");
-    SetDocAuthors("Remi Cresson");
-
-    AddDocTag(Tags::Learning);
-
-    // Input/output images
-    AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() ; i++)
-      AddAnInputImage();
-
-    // Input model
-    AddParameter(ParameterType_Group,         "model",           "model parameters");
-    AddParameter(ParameterType_Directory,     "model.dir",       "TensorFlow SavedModel directory");
-    MandatoryOn                              ("model.dir");
-    SetParameterDescription                  ("model.dir", "The model directory should contains the model Google Protobuf (.pb) and variables");
-
-    AddParameter(ParameterType_StringList,    "model.userplaceholders",    "Additional single-valued placeholders. Supported types: int, float, bool.");
-    MandatoryOff                             ("model.userplaceholders");
-    SetParameterDescription                  ("model.userplaceholders", "Syntax to use is \"placeholder_1=value_1 ... placeholder_N=value_N\"");
-    AddParameter(ParameterType_Bool,          "model.fullyconv", "Fully convolutional");
-    MandatoryOff                             ("model.fullyconv");
-    AddParameter(ParameterType_StringList,    "model.tagsets",    "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
-    MandatoryOff                             ("model.tagsets");
-
-    // Output tensors parameters
-    AddParameter(ParameterType_Group,         "output",          "Output tensors parameters");
-    AddParameter(ParameterType_Float,         "output.spcscale", "The output spacing scale, related to the first input");
-    SetDefaultParameterFloat                 ("output.spcscale", 1.0);
-    SetParameterDescription                  ("output.spcscale", "The output image size/scale and spacing*scale where size and spacing corresponds to the first input");
-    AddParameter(ParameterType_StringList,    "output.names",    "Names of the output tensors");
-    MandatoryOff                            ("output.names");
-
-    // Output Field of Expression
-    AddParameter(ParameterType_Int,           "output.efieldx", "The output expression field (width)");
-    SetMinimumParameterIntValue              ("output.efieldx", 1);
-    SetDefaultParameterInt                   ("output.efieldx", 1);
-    MandatoryOn                              ("output.efieldx");
-    AddParameter(ParameterType_Int,           "output.efieldy", "The output expression field (height)");
-    SetMinimumParameterIntValue              ("output.efieldy", 1);
-    SetDefaultParameterInt                   ("output.efieldy", 1);
-    MandatoryOn                              ("output.efieldy");
-
-    // Fine tuning
-    AddParameter(ParameterType_Group,         "optim" , "This group of parameters allows optimization of processing time");
-    AddParameter(ParameterType_Bool,          "optim.disabletiling", "Disable tiling");
-    MandatoryOff                             ("optim.disabletiling");
-    SetParameterDescription                  ("optim.disabletiling", "Tiling avoids to process a too large subset of image, but sometimes it can be useful to disable it");
-    AddParameter(ParameterType_Int,           "optim.tilesizex", "Tile width used to stream the filter output");
-    SetMinimumParameterIntValue              ("optim.tilesizex", 1);
-    SetDefaultParameterInt                   ("optim.tilesizex", 16);
-    AddParameter(ParameterType_Int,           "optim.tilesizey", "Tile height used to stream the filter output");
-    SetMinimumParameterIntValue              ("optim.tilesizey", 1);
-    SetDefaultParameterInt                   ("optim.tilesizey", 16);
-
-    // Output image
-    AddParameter(ParameterType_OutputImage, "out", "output image");
-
-    // Example
-    SetDocExampleParameterValue("source1.il",             "spot6pms.tif");
-    SetDocExampleParameterValue("source1.placeholder",    "x1");
-    SetDocExampleParameterValue("source1.rfieldx",        "16");
-    SetDocExampleParameterValue("source1.rfieldy",        "16");
-    SetDocExampleParameterValue("model.dir",              "/tmp/my_saved_model/");
-    SetDocExampleParameterValue("model.userplaceholders", "is_training=false dropout=0.0");
-    SetDocExampleParameterValue("output.names",           "out_predict1 out_proba1");
-    SetDocExampleParameterValue("out",                    "\"classif128tgt.tif?&streaming:type=tiled&streaming:sizemode=height&streaming:sizevalue=256\"");
-
-  }
-
-  //
-  // Prepare bundles from the number of points
-  //
-  void PrepareInputs()
-  {
-
-    for (auto& bundle: m_Bundles)
-    {
-      // Setting the image source
-      FloatVectorImageListType::Pointer list = GetParameterImageList(bundle.m_KeyIn);
-      bundle.m_ImageSource.Set(list);
-      bundle.m_Placeholder = GetParameterAsString(bundle.m_KeyPHName);
-      bundle.m_PatchSize[0] = GetParameterInt(bundle.m_KeyPszX);
-      bundle.m_PatchSize[1] = GetParameterInt(bundle.m_KeyPszY);
-
-      otbAppLogINFO("Source info :");
-      otbAppLogINFO("Receptive field  : " << bundle.m_PatchSize  );
-      otbAppLogINFO("Placeholder name : " << bundle.m_Placeholder);
-    }
-  }
-
-  void DoExecute()
-  {
-
-    // Load the Tensorflow bundle
-    tf::LoadModel(GetParameterAsString("model.dir"), m_SavedModel, GetParameterStringList("model.tagsets"));
-
-    // Prepare inputs
-    PrepareInputs();
-
-    // Setup filter
-    m_TFFilter = TFModelFilterType::New();
-    m_TFFilter->SetSavedModel(&m_SavedModel);
-    m_TFFilter->SetOutputTensors(GetParameterStringList("output.names"));
-    m_TFFilter->SetOutputSpacingScale(GetParameterFloat("output.spcscale"));
-    otbAppLogINFO("Output spacing ratio: " << m_TFFilter->GetOutputSpacingScale());
-
-    // Get user placeholders
-    TFModelFilterType::StringList expressions = GetParameterStringList("model.userplaceholders");
-    TFModelFilterType::DictType dict;
-    for (auto& exp: expressions)
-    {
-      TFModelFilterType::DictElementType entry = tf::ExpressionToTensor(exp);
-      dict.push_back(entry);
-
-      otbAppLogINFO("Using placeholder " << entry.first << " with " << tf::PrintTensorInfos(entry.second));
-    }
-    m_TFFilter->SetUserPlaceholders(dict);
-
-    // Input sources
-    for (auto& bundle: m_Bundles)
-    {
-      m_TFFilter->PushBackInputTensorBundle(bundle.m_Placeholder, bundle.m_PatchSize, bundle.m_ImageSource.Get());
-    }
-
-    // Fully convolutional mode on/off
-    if (GetParameterInt("model.fullyconv")==1)
-    {
-      otbAppLogINFO("The TensorFlow model is used in fully convolutional mode");
-      m_TFFilter->SetFullyConvolutional(true);
-    }
-
-    // Output field of expression
-    FloatVectorImageType::SizeType foe;
-    foe[0] = GetParameterInt("output.efieldx");
-    foe[1] = GetParameterInt("output.efieldy");
-    m_TFFilter->SetOutputExpressionFields({foe});
-
-    otbAppLogINFO("Output field of expression: " << m_TFFilter->GetOutputExpressionFields()[0]);
-
-    // Streaming
-    if (GetParameterInt("optim.disabletiling") != 1)
-    {
-      // Get the tile size
-      SizeType tileSize;
-      tileSize[0] = GetParameterInt("optim.tilesizex");
-      tileSize[1] = GetParameterInt("optim.tilesizey");
-
-      // Check that the tile size is aligned to the field of expression
-      for (unsigned int i = 0 ; i < FloatVectorImageType::ImageDimension ; i++)
-        if (tileSize[i] % foe[i] != 0)
-          {
-          SizeType::SizeValueType newSize = 1 + std::floor(tileSize[i] / foe[i]);
-          newSize *= foe[i];
-
-          otbAppLogWARNING("Aligning the tiling to the output expression field "
-              << "for better performances (dim " << i << "). New value set to " << newSize)
-
-          tileSize[i] = newSize;
-          }
-
-      otbAppLogINFO("Force tiling with squared tiles of " << tileSize)
-
-      // Force the computation tile by tile
-      m_StreamFilter = StreamingFilterType::New();
-      m_StreamFilter->SetOutputGridSize(tileSize);
-      m_StreamFilter->SetInput(m_TFFilter->GetOutput());
-
-      SetParameterOutputImage("out", m_StreamFilter->GetOutput());
-    }
-    else
-    {
-      otbAppLogINFO("Tiling disabled");
-      SetParameterOutputImage("out", m_TFFilter->GetOutput());
-    }
-  }
-  
-
-  void DoUpdateParameters()
-  {
-  }
-
-private:
-
-  TFModelFilterType::Pointer   m_TFFilter;
-  StreamingFilterType::Pointer m_StreamFilter;
-  tensorflow::SavedModelBundle m_SavedModel; // must be alive during all the execution of the application !
-
-  std::vector<ProcessObjectsBundle> m_Bundles;
-
-}; // end of class
-
-} // namespace wrapper
-} // namespace otb
-
-OTB_APPLICATION_EXPORT( otb::Wrapper::TensorflowModelServe )
diff --git a/include/otbTensorflowModelTrain.cxx b/include/otbTensorflowModelTrain.cxx
deleted file mode 100644
index e7901998..00000000
--- a/include/otbTensorflowModelTrain.cxx
+++ /dev/null
@@ -1,568 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-#include "otbWrapperApplicationFactory.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// Tensorflow SavedModel
-#include "tensorflow/cc/saved_model/loader.h"
-
-// Tensorflow model train
-#include "otbTensorflowMultisourceModelTrain.h"
-#include "otbTensorflowMultisourceModelValidate.h"
-
-// Tensorflow graph load
-#include "otbTensorflowGraphOperations.h"
-
-// Layerstack
-#include "otbTensorflowSource.h"
-
-// Metrics
-#include "otbConfusionMatrixMeasurements.h"
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class TensorflowModelTrain : public Application
-{
-public:
-
-  /** Standard class typedefs. */
-  typedef TensorflowModelTrain                       Self;
-  typedef Application                                Superclass;
-  typedef itk::SmartPointer<Self>                    Pointer;
-  typedef itk::SmartPointer<const Self>              ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(TensorflowModelTrain, Application);
-
-  /** Typedefs for TensorFlow */
-  typedef otb::TensorflowMultisourceModelTrain<FloatVectorImageType>    TrainModelFilterType;
-  typedef otb::TensorflowMultisourceModelValidate<FloatVectorImageType> ValidateModelFilterType;
-  typedef otb::TensorflowSource<FloatVectorImageType>                   TFSource;
-
-  /* Typedefs for evaluation metrics */
-  typedef ValidateModelFilterType::ConfMatType                          ConfMatType;
-  typedef ValidateModelFilterType::MapOfClassesType                     MapOfClassesType;
-  typedef ValidateModelFilterType::LabelValueType                       LabelValueType;
-  typedef otb::ConfusionMatrixMeasurements<ConfMatType, LabelValueType> ConfusionMatrixCalculatorType;
-
-  //
-  // Store stuff related to one source
-  //
-  struct ProcessObjectsBundle
-  {
-    TFSource tfSource;
-    TFSource tfSourceForValidation;
-
-    // Parameters keys
-    std::string m_KeyInForTrain;     // Key of input image list (training)
-    std::string m_KeyInForValid;     // Key of input image list (validation)
-    std::string m_KeyPHNameForTrain; // Key for placeholder name in the TensorFlow model (training)
-    std::string m_KeyPHNameForValid; // Key for placeholder name in the TensorFlow model (validation)
-    std::string m_KeyPszX;   // Key for samples sizes X
-    std::string m_KeyPszY;   // Key for samples sizes Y
-  };
-
-  /** Typedefs for the app */
-  typedef std::vector<ProcessObjectsBundle>           BundleList;
-  typedef std::vector<FloatVectorImageType::SizeType> SizeList;
-  typedef std::vector<std::string>                    StringList;
-
-  void DoUpdateParameters()
-  {
-  }
-
-  //
-  // Add an input source, which includes:
-  // -an input image list        (for training)
-  // -an input image placeholder (for training)
-  // -an input image list        (for validation)
-  // -an input image placeholder (for validation)
-  // -an input patchsize, which is the dimensions of samples. Same for training and validation.
-  //
-  void AddAnInputImage()
-  {
-    // Number of source
-    unsigned int inputNumber = m_Bundles.size() + 1;
-
-    // Create keys and descriptions
-    std::stringstream ss_key_tr_group, ss_desc_tr_group,
-    ss_key_val_group, ss_desc_val_group,
-    ss_key_tr_in, ss_desc_tr_in,
-    ss_key_val_in, ss_desc_val_in,
-    ss_key_dims_x, ss_desc_dims_x,
-    ss_key_dims_y, ss_desc_dims_y,
-    ss_key_tr_ph, ss_desc_tr_ph,
-    ss_key_val_ph, ss_desc_val_ph;
-
-    // Parameter group key/description
-    ss_key_tr_group   << "training.source"         << inputNumber;
-    ss_key_val_group  << "validation.source"       << inputNumber;
-    ss_desc_tr_group  << "Parameters for source #" << inputNumber << " (training)";
-    ss_desc_val_group << "Parameters for source #" << inputNumber << " (validation)";
-
-    // Parameter group keys
-    ss_key_tr_in   << ss_key_tr_group.str()  << ".il";
-    ss_key_val_in  << ss_key_val_group.str() << ".il";
-    ss_key_dims_x  << ss_key_tr_group.str()  << ".patchsizex";
-    ss_key_dims_y  << ss_key_tr_group.str()  << ".patchsizey";
-    ss_key_tr_ph   << ss_key_tr_group.str()  << ".placeholder";
-    ss_key_val_ph  << ss_key_val_group.str() << ".name";
-
-    // Parameter group descriptions
-    ss_desc_tr_in  << "Input image (or list to stack) for source #" << inputNumber << " (training)";
-    ss_desc_val_in << "Input image (or list to stack) for source #" << inputNumber << " (validation)";
-    ss_desc_dims_x << "Patch size (x) for source #"                 << inputNumber;
-    ss_desc_dims_y << "Patch size (y) for source #"                 << inputNumber;
-    ss_desc_tr_ph  << "Name of the input placeholder for source #"  << inputNumber << " (training)";
-    ss_desc_val_ph << "Name of the input placeholder "
-        "or output tensor for source #"                             << inputNumber << " (validation)";
-
-    // Populate group
-    AddParameter(ParameterType_Group,          ss_key_tr_group.str(),  ss_desc_tr_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_tr_in.str(),     ss_desc_tr_in.str() );
-    AddParameter(ParameterType_Int,            ss_key_dims_x.str(),    ss_desc_dims_x.str());
-    SetMinimumParameterIntValue               (ss_key_dims_x.str(),    1);
-    AddParameter(ParameterType_Int,            ss_key_dims_y.str(),    ss_desc_dims_y.str());
-    SetMinimumParameterIntValue               (ss_key_dims_y.str(),    1);
-    AddParameter(ParameterType_String,         ss_key_tr_ph.str(),     ss_desc_tr_ph.str());
-    AddParameter(ParameterType_Group,          ss_key_val_group.str(), ss_desc_val_group.str());
-    AddParameter(ParameterType_InputImageList, ss_key_val_in.str(),    ss_desc_val_in.str() );
-    AddParameter(ParameterType_String,         ss_key_val_ph.str(),    ss_desc_val_ph.str());
-
-    // Add a new bundle
-    ProcessObjectsBundle bundle;
-    bundle.m_KeyInForTrain     = ss_key_tr_in.str();
-    bundle.m_KeyInForValid     = ss_key_val_in.str();
-    bundle.m_KeyPHNameForTrain = ss_key_tr_ph.str();
-    bundle.m_KeyPHNameForValid = ss_key_val_ph.str();
-    bundle.m_KeyPszX           = ss_key_dims_x.str();
-    bundle.m_KeyPszY           = ss_key_dims_y.str();
-
-    m_Bundles.push_back(bundle);
-  }
-
-  void DoInit()
-  {
-
-    // Documentation
-    SetName("TensorflowModelTrain");
-    SetDescription("Train a multisource deep learning net using Tensorflow. Change "
-        "the " + tf::ENV_VAR_NAME_NSOURCES + " environment variable to set the number of "
-        "sources.");
-    SetDocLongDescription("The application trains a Tensorflow model over multiple data sources. "
-        "The number of input sources can be changed at runtime by setting the "
-        "system environment variable " + tf::ENV_VAR_NAME_NSOURCES + ". "
-        "For each source, you have to set (1) the tensor placeholder name, as named in "
-        "the tensorflow model, (2) the patch size and (3) the image(s) source. ");
-    SetDocAuthors("Remi Cresson");
-
-    AddDocTag(Tags::Learning);
-
-    // Input model
-    AddParameter(ParameterType_Group,       "model",              "Model parameters");
-    AddParameter(ParameterType_Directory,   "model.dir",          "Tensorflow model_save directory");
-    MandatoryOn                            ("model.dir");
-    AddParameter(ParameterType_String,      "model.restorefrom",  "Restore model from path");
-    MandatoryOff                           ("model.restorefrom");
-    AddParameter(ParameterType_String,      "model.saveto",       "Save model to path");
-    MandatoryOff                           ("model.saveto");
-    AddParameter(ParameterType_StringList,  "model.tagsets",    "Which tags (i.e. v1.MetaGraphDefs) to load from the saved model. Currently, only one tag is supported. Can be retrieved by running `saved_model_cli  show --dir your_model_dir --all`");
-    MandatoryOff                           ("model.tagsets");
-
-    // Training parameters group
-    AddParameter(ParameterType_Group,       "training",           "Training parameters");
-    AddParameter(ParameterType_Int,         "training.batchsize", "Batch size");
-    SetMinimumParameterIntValue            ("training.batchsize", 1);
-    SetDefaultParameterInt                 ("training.batchsize", 100);
-    AddParameter(ParameterType_Int,         "training.epochs",    "Number of epochs");
-    SetMinimumParameterIntValue            ("training.epochs",    1);
-    SetDefaultParameterInt                 ("training.epochs",    100);
-    AddParameter(ParameterType_StringList,  "training.userplaceholders",
-                 "Additional single-valued placeholders for training. Supported types: int, float, bool.");
-    MandatoryOff                           ("training.userplaceholders");
-    AddParameter(ParameterType_StringList,  "training.targetnodes",    "Names of the target nodes");
-    MandatoryOn                            ("training.targetnodes");
-    AddParameter(ParameterType_StringList,  "training.outputtensors",  "Names of the output tensors to display");
-    MandatoryOff                           ("training.outputtensors");
-    AddParameter(ParameterType_Bool,        "training.usestreaming",   "Use the streaming through patches (slower but can process big dataset)");
-    MandatoryOff                           ("training.usestreaming");
-
-    // Metrics
-    AddParameter(ParameterType_Group,       "validation",              "Validation parameters");
-    MandatoryOff                           ("validation");
-    AddParameter(ParameterType_Int,         "validation.step",         "Perform the validation every Nth epochs");
-    SetMinimumParameterIntValue            ("validation.step",         1);
-    SetDefaultParameterInt                 ("validation.step",         10);
-    AddParameter(ParameterType_Choice,      "validation.mode",         "Metrics to compute");
-    AddChoice                              ("validation.mode.none",    "No validation step");
-    AddChoice                              ("validation.mode.class",   "Classification metrics");
-    AddChoice                              ("validation.mode.rmse",    "Root mean square error");
-    AddParameter(ParameterType_StringList,  "validation.userplaceholders",
-                 "Additional single-valued placeholders for validation. Supported types: int, float, bool.");
-    MandatoryOff                           ("validation.userplaceholders");
-    AddParameter(ParameterType_Bool,        "validation.usestreaming", "Use the streaming through patches (slower but can process big dataset)");
-    MandatoryOff                           ("validation.usestreaming");
-
-    // Input/output images
-    AddAnInputImage();
-    for (int i = 1; i < tf::GetNumberOfSources() + 1 ; i++) // +1 because we have at least 1 source more for training
-      {
-      AddAnInputImage();
-      }
-
-    // Example
-    SetDocExampleParameterValue("source1.il",                "spot6pms.tif");
-    SetDocExampleParameterValue("source1.placeholder",       "x1");
-    SetDocExampleParameterValue("source1.patchsizex",        "16");
-    SetDocExampleParameterValue("source1.patchsizey",        "16");
-    SetDocExampleParameterValue("source2.il",                "labels.tif");
-    SetDocExampleParameterValue("source2.placeholder",       "y1");
-    SetDocExampleParameterValue("source2.patchsizex",        "1");
-    SetDocExampleParameterValue("source2.patchsizex",        "1");
-    SetDocExampleParameterValue("model.dir",                 "/tmp/my_saved_model/");
-    SetDocExampleParameterValue("training.userplaceholders", "is_training=true dropout=0.2");
-    SetDocExampleParameterValue("training.targetnodes",      "optimizer");
-    SetDocExampleParameterValue("model.saveto",              "/tmp/my_saved_model/variables/variables");
-
-  }
-
-  //
-  // Prepare bundles
-  // Here, we populate the two following groups:
-  // 1.Training :
-  //   -Placeholders
-  //   -PatchSize
-  //   -ImageSource
-  // 2.Learning/Validation
-  //   -Placeholders (if input) or Tensor name (if target)
-  //   -PatchSize (which is the same as for training)
-  //   -ImageSource (depending if it's for learning or validation)
-  //
-  // TODO: a bit of refactoring. We could simply rely on m_Bundles
-  //       if we can keep trace of indices of sources for
-  //       training / test / validation
-  //
-  void PrepareInputs()
-  {
-    // Clear placeholder names
-    m_InputPlaceholdersForTraining.clear();
-    m_InputPlaceholdersForValidation.clear();
-
-    // Clear patches sizes
-    m_InputPatchesSizeForTraining.clear();
-    m_InputPatchesSizeForValidation.clear();
-    m_TargetPatchesSize.clear();
-
-    // Clear bundles
-    m_InputSourcesForTraining.clear();
-    m_InputSourcesForEvaluationAgainstLearningData.clear();
-    m_InputSourcesForEvaluationAgainstValidationData.clear();
-
-    m_TargetTensorsNames.clear();
-    m_InputTargetsForEvaluationAgainstValidationData.clear();
-    m_InputTargetsForEvaluationAgainstLearningData.clear();
-
-
-    // Prepare the bundles
-    for (auto& bundle: m_Bundles)
-      {
-      // Source
-      FloatVectorImageListType::Pointer trainStack = GetParameterImageList(bundle.m_KeyInForTrain);
-      bundle.tfSource.Set(trainStack);
-      m_InputSourcesForTraining.push_back(bundle.tfSource.Get());
-
-      // Placeholder
-      std::string placeholderForTraining = GetParameterAsString(bundle.m_KeyPHNameForTrain);
-      m_InputPlaceholdersForTraining.push_back(placeholderForTraining);
-
-      // Patch size
-      FloatVectorImageType::SizeType patchSize;
-      patchSize[0] = GetParameterInt(bundle.m_KeyPszX);
-      patchSize[1] = GetParameterInt(bundle.m_KeyPszY);
-      m_InputPatchesSizeForTraining.push_back(patchSize);
-
-      otbAppLogINFO("New source:");
-      otbAppLogINFO("Patch size               : "<< patchSize);
-      otbAppLogINFO("Placeholder (training)   : "<< placeholderForTraining);
-
-      // Prepare validation sources
-      if (GetParameterInt("validation.mode") != 0)
-        {
-        // Get the stack
-        if (!HasValue(bundle.m_KeyInForValid))
-          {
-          otbAppLogFATAL("No validation input is set for this source");
-          }
-        FloatVectorImageListType::Pointer validStack = GetParameterImageList(bundle.m_KeyInForValid);
-        bundle.tfSourceForValidation.Set(validStack);
-
-        // We check if the placeholder is the same for training and for validation
-        // If yes, it means that its not an output tensor on which perform the validation
-        std::string placeholderForValidation = GetParameterAsString(bundle.m_KeyPHNameForValid);
-        if (placeholderForValidation.empty())
-          {
-          placeholderForValidation = placeholderForTraining;
-          }
-        // Same placeholder name ==> is a source for validation
-        if (placeholderForValidation.compare(placeholderForTraining) == 0)
-          {
-          // Source
-          m_InputSourcesForEvaluationAgainstValidationData.push_back(bundle.tfSourceForValidation.Get());
-          m_InputSourcesForEvaluationAgainstLearningData.push_back(bundle.tfSource.Get());
-
-          // Placeholder
-          m_InputPlaceholdersForValidation.push_back(placeholderForValidation);
-
-          // Patch size
-          m_InputPatchesSizeForValidation.push_back(patchSize);
-
-          otbAppLogINFO("Placeholder (validation) : "<< placeholderForValidation);
-
-          }
-        // Different placeholder ==> is a target to validate
-        else
-          {
-          // Source
-          m_InputTargetsForEvaluationAgainstValidationData.push_back(bundle.tfSourceForValidation.Get());
-          m_InputTargetsForEvaluationAgainstLearningData.push_back(bundle.tfSource.Get());
-
-          // Placeholder
-          m_TargetTensorsNames.push_back(placeholderForValidation);
-
-          // Patch size
-          m_TargetPatchesSize.push_back(patchSize);
-
-          otbAppLogINFO("Tensor name (validation) : "<< placeholderForValidation);
-          }
-
-        }
-
-      }
-  }
-
-  //
-  // Get user placeholders
-  //
-  TrainModelFilterType::DictType GetUserPlaceholders(const std::string & key)
-  {
-    TrainModelFilterType::DictType dict;
-    TrainModelFilterType::StringList expressions = GetParameterStringList(key);
-    for (auto& exp: expressions)
-      {
-      TrainModelFilterType::DictElementType entry = tf::ExpressionToTensor(exp);
-      dict.push_back(entry);
-
-      otbAppLogINFO("Using placeholder " << entry.first << " with " << tf::PrintTensorInfos(entry.second));
-      }
-    return dict;
-  }
-
-  //
-  // Print some classification metrics
-  //
-  void PrintClassificationMetrics(const ConfMatType & confMat, const MapOfClassesType & mapOfClassesRef)
-  {
-    ConfusionMatrixCalculatorType::Pointer confMatMeasurements = ConfusionMatrixCalculatorType::New();
-    confMatMeasurements->SetConfusionMatrix(confMat);
-    confMatMeasurements->SetMapOfClasses(mapOfClassesRef);
-    confMatMeasurements->Compute();
-
-    for (auto const& itMapOfClassesRef : mapOfClassesRef)
-      {
-      LabelValueType labelRef = itMapOfClassesRef.first;
-      LabelValueType indexLabelRef = itMapOfClassesRef.second;
-
-      otbAppLogINFO("Precision of class [" << labelRef << "] vs all: " << confMatMeasurements->GetPrecisions()[indexLabelRef]);
-      otbAppLogINFO("Recall of class [" << labelRef << "] vs all: " << confMatMeasurements->GetRecalls()[indexLabelRef]);
-      otbAppLogINFO("F-score of class [" << labelRef << "] vs all: " << confMatMeasurements->GetFScores()[indexLabelRef]);
-      otbAppLogINFO("\t");
-      }
-    otbAppLogINFO("Precision of the different classes: " << confMatMeasurements->GetPrecisions());
-    otbAppLogINFO("Recall of the different classes: " << confMatMeasurements->GetRecalls());
-    otbAppLogINFO("F-score of the different classes: " << confMatMeasurements->GetFScores());
-    otbAppLogINFO("\t");
-    otbAppLogINFO("Kappa index: " << confMatMeasurements->GetKappaIndex());
-    otbAppLogINFO("Overall accuracy index: " << confMatMeasurements->GetOverallAccuracy());
-    otbAppLogINFO("Confusion matrix:\n" << confMat);
-  }
-
-  void DoExecute()
-  {
-
-    // Load the Tensorflow bundle
-    tf::LoadModel(GetParameterAsString("model.dir"), m_SavedModel, GetParameterStringList("model.tagsets"));
-
-    // Check if we have to restore variables from somewhere else
-    if (HasValue("model.restorefrom"))
-      {
-      const std::string path = GetParameterAsString("model.restorefrom");
-      otbAppLogINFO("Restoring model from " + path);
-
-      // Load SavedModel variables
-      tf::RestoreModel(path, m_SavedModel);
-      }
-
-    // Prepare inputs
-    PrepareInputs();
-
-    // Setup training filter
-    m_TrainModelFilter = TrainModelFilterType::New();
-    m_TrainModelFilter->SetSavedModel(&m_SavedModel);
-    m_TrainModelFilter->SetOutputTensors(GetParameterStringList("training.outputtensors"));
-    m_TrainModelFilter->SetTargetNodesNames(GetParameterStringList("training.targetnodes"));
-    m_TrainModelFilter->SetBatchSize(GetParameterInt("training.batchsize"));
-    m_TrainModelFilter->SetUserPlaceholders(GetUserPlaceholders("training.userplaceholders"));
-    m_TrainModelFilter->SetUseStreaming(GetParameterInt("training.usestreaming"));
-
-    // Set inputs
-    for (unsigned int i = 0 ; i < m_InputSourcesForTraining.size() ; i++)
-      {
-      m_TrainModelFilter->PushBackInputTensorBundle(
-          m_InputPlaceholdersForTraining[i],
-          m_InputPatchesSizeForTraining[i],
-          m_InputSourcesForTraining[i]);
-      }
-
-    // Setup the validation filter
-    const bool do_validation = HasUserValue("validation.mode");
-    if (GetParameterInt("validation.mode")==1) // class
-      {
-      otbAppLogINFO("Set validation mode to classification validation");
-
-      m_ValidateModelFilter = ValidateModelFilterType::New();
-      m_ValidateModelFilter->SetSavedModel(&m_SavedModel);
-      m_ValidateModelFilter->SetBatchSize(GetParameterInt("training.batchsize"));
-      m_ValidateModelFilter->SetUserPlaceholders(GetUserPlaceholders("validation.userplaceholders"));
-      m_ValidateModelFilter->SetInputPlaceholders(m_InputPlaceholdersForValidation);
-      m_ValidateModelFilter->SetInputReceptiveFields(m_InputPatchesSizeForValidation);
-      m_ValidateModelFilter->SetOutputTensors(m_TargetTensorsNames);
-      m_ValidateModelFilter->SetOutputExpressionFields(m_TargetPatchesSize);
-      }
-    else if (GetParameterInt("validation.mode")==2) // rmse)
-      {
-      otbAppLogINFO("Set validation mode to classification RMSE evaluation");
-      otbAppLogFATAL("Not implemented yet !"); // XD
-
-      // TODO
-      }
-
-    // Epoch
-    for (int epoch = 1 ; epoch <= GetParameterInt("training.epochs") ; epoch++)
-      {
-      // Train the model
-      AddProcess(m_TrainModelFilter, "Training epoch #" + std::to_string(epoch));
-      m_TrainModelFilter->Update();
-
-      if (do_validation)
-      {
-        // Validate the model
-        if (epoch % GetParameterInt("validation.step") == 0)
-        {
-          // 1. Evaluate the metrics against the learning data
-
-          for (unsigned int i = 0 ; i < m_InputSourcesForEvaluationAgainstLearningData.size() ; i++)
-          {
-            m_ValidateModelFilter->SetInput(i, m_InputSourcesForEvaluationAgainstLearningData[i]);
-          }
-          m_ValidateModelFilter->SetInputReferences(m_InputTargetsForEvaluationAgainstLearningData);
-
-          // As we use the learning data here, it's rational to use the same option as streaming during training
-          m_ValidateModelFilter->SetUseStreaming(GetParameterInt("training.usestreaming"));
-
-          // Update
-          AddProcess(m_ValidateModelFilter, "Evaluate model (Learning data)");
-          m_ValidateModelFilter->Update();
-
-          for (unsigned int i = 0 ; i < m_TargetTensorsNames.size() ; i++)
-          {
-            otbAppLogINFO("Metrics for target \"" << m_TargetTensorsNames[i] << "\":");
-            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i), m_ValidateModelFilter->GetMapOfClasses(i));
-          }
-
-          // 2. Evaluate the metrics against the validation data
-
-          // Here we just change the input sources and references
-          for (unsigned int i = 0 ; i < m_InputSourcesForEvaluationAgainstValidationData.size() ; i++)
-          {
-            m_ValidateModelFilter->SetInput(i, m_InputSourcesForEvaluationAgainstValidationData[i]);
-          }
-          m_ValidateModelFilter->SetInputReferences(m_InputTargetsForEvaluationAgainstValidationData);
-          m_ValidateModelFilter->SetUseStreaming(GetParameterInt("validation.usestreaming"));
-
-          // Update
-          AddProcess(m_ValidateModelFilter, "Evaluate model (Validation data)");
-          m_ValidateModelFilter->Update();
-
-          for (unsigned int i = 0 ; i < m_TargetTensorsNames.size() ; i++)
-          {
-            otbAppLogINFO("Metrics for target \"" << m_TargetTensorsNames[i] << "\":");
-            PrintClassificationMetrics(m_ValidateModelFilter->GetConfusionMatrix(i), m_ValidateModelFilter->GetMapOfClasses(i));
-          }
-        } // Step is OK to perform validation
-      } // Do the validation against the validation data
-
-      } // Next epoch
-
-    // Check if we have to save variables to somewhere
-    if (HasValue("model.saveto"))
-      {
-      const std::string path = GetParameterAsString("model.saveto");
-      otbAppLogINFO("Saving model to " + path);
-      tf::SaveModel(path, m_SavedModel);
-      }
-
-  }
-
-private:
-
-  tensorflow::SavedModelBundle     m_SavedModel; // must be alive during all the execution of the application !
-
-  // Filters
-  TrainModelFilterType::Pointer    m_TrainModelFilter;
-  ValidateModelFilterType::Pointer m_ValidateModelFilter;
-
-  // Inputs
-  BundleList m_Bundles;
-
-  // Patches size
-  SizeList   m_InputPatchesSizeForTraining;
-  SizeList   m_InputPatchesSizeForValidation;
-  SizeList   m_TargetPatchesSize;
-
-  // Placeholders and Tensors names
-  StringList m_InputPlaceholdersForTraining;
-  StringList m_InputPlaceholdersForValidation;
-  StringList m_TargetTensorsNames;
-
-  // Image sources
-  std::vector<FloatVectorImageType::Pointer> m_InputSourcesForTraining;
-  std::vector<FloatVectorImageType::Pointer> m_InputSourcesForEvaluationAgainstLearningData;
-  std::vector<FloatVectorImageType::Pointer> m_InputSourcesForEvaluationAgainstValidationData;
-  std::vector<FloatVectorImageType::Pointer> m_InputTargetsForEvaluationAgainstLearningData;
-  std::vector<FloatVectorImageType::Pointer> m_InputTargetsForEvaluationAgainstValidationData;
-
-}; // end of class
-
-} // namespace wrapper
-} // namespace otb
-
-OTB_APPLICATION_EXPORT( otb::Wrapper::TensorflowModelTrain )
diff --git a/include/otbTrainClassifierFromDeepFeatures.cxx b/include/otbTrainClassifierFromDeepFeatures.cxx
deleted file mode 100644
index 39ac4189..00000000
--- a/include/otbTrainClassifierFromDeepFeatures.cxx
+++ /dev/null
@@ -1,130 +0,0 @@
-/*=========================================================================
-
-     Copyright (c) 2018-2019 IRSTEA
-     Copyright (c) 2020-2021 INRAE
-
-
-     This software is distributed WITHOUT ANY WARRANTY; without even
-     the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
-     PURPOSE.  See the above copyright notices for more information.
-
-=========================================================================*/
-#include "itkFixedArray.h"
-#include "itkObjectFactory.h"
-
-// Elevation handler
-#include "otbWrapperElevationParametersHandler.h"
-#include "otbWrapperApplicationFactory.h"
-#include "otbWrapperCompositeApplication.h"
-
-// Application engine
-#include "otbStandardFilterWatcher.h"
-#include "itkFixedArray.h"
-
-// TF (used to get the environment variable for the number of inputs)
-#include "otbTensorflowCommon.h"
-
-namespace otb
-{
-
-namespace Wrapper
-{
-
-class TrainClassifierFromDeepFeatures : public CompositeApplication
-{
-public:
-  /** Standard class typedefs. */
-  typedef TrainClassifierFromDeepFeatures              Self;
-  typedef Application                         Superclass;
-  typedef itk::SmartPointer<Self>             Pointer;
-  typedef itk::SmartPointer<const Self>       ConstPointer;
-
-  /** Standard macro */
-  itkNewMacro(Self);
-  itkTypeMacro(TrainClassifierFromDeepFeatures, otb::Wrapper::CompositeApplication);
-
-private:
-
-  //
-  // Add an input source, which includes:
-  // -an input image list
-  // -an input patchsize (dimensions of samples)
-  //
-  void AddAnInputImage(int inputNumber = 0)
-  {
-    inputNumber++;
-
-    // Create keys and descriptions
-    std::stringstream ss_key_group, ss_desc_group;
-    ss_key_group << "source" << inputNumber;
-    ss_desc_group << "Parameters for source " << inputNumber;
-
-    // Populate group
-    ShareParameter(ss_key_group.str(), "tfmodel." + ss_key_group.str(), ss_desc_group.str());
-
-  }
-
-  void DoInit()
-  {
-
-  SetName("TrainClassifierFromDeepFeatures");
-  SetDescription("Train a classifier from deep net based features of an image and training vector data.");
-
-  // Documentation
-  SetDocLongDescription("See TrainImagesClassifier application");
-  SetDocLimitations("None");
-  SetDocAuthors("Remi Cresson");
-  SetDocSeeAlso(" ");
-
-  AddDocTag(Tags::Learning);
-
-  ClearApplications();
-
-  // Add applications
-  AddApplication("TrainImagesClassifier",  "train",   "Train images classifier");
-  AddApplication("TensorflowModelServe",   "tfmodel", "Serve the TF model");
-
-  // Model shared parameters
-  AddAnInputImage();
-  for (int i = 1; i < tf::GetNumberOfSources() ; i++)
-  {
-    AddAnInputImage(i);
-  }
-  ShareParameter("model",      "tfmodel.model",       "Deep net inputs parameters",   "Parameters of the deep net inputs: placeholder names, receptive fields, etc.");
-  ShareParameter("output",     "tfmodel.output",      "Deep net outputs parameters",  "Parameters of the deep net outputs: tensors names, expression fields, etc.");
-  ShareParameter("optim",      "tfmodel.optim",       "Processing time optimization", "This group of parameters allows optimization of processing time");
-
-  // Train shared parameters
-  ShareParameter("ram",        "train.ram",           "Available RAM (Mb)",           "Available RAM (Mb)");
-  ShareParameter("vd",         "train.io.vd",         "Vector data for training",     "Input vector data for training");
-  ShareParameter("valid",      "train.io.valid",      "Vector data for validation",   "Input vector data for validation");
-  ShareParameter("out",        "train.io.out",        "Output classification model",  "Output classification model");
-  ShareParameter("confmatout", "train.io.confmatout", "Output confusion matrix",      "Output confusion matrix of the classification model");
-
-  // Shared parameter groups
-  ShareParameter("sample",     "train.sample",        "Sampling parameters" ,         "Training and validation samples parameters" );
-  ShareParameter("elev",       "train.elev",          "Elevation parameters",         "Elevation parameters" );
-  ShareParameter("classifier", "train.classifier",    "Classifier parameters",        "Classifier parameters" );
-  ShareParameter("rand",       "train.rand",          "User defined random seed",     "User defined random seed" );
-
-  }
-
-
-  void DoUpdateParameters()
-  {
-    UpdateInternalParameters("train");
-  }
-
-  void DoExecute()
-  {
-    ExecuteInternal("tfmodel");
-    GetInternalApplication("train")->AddImageToParameterInputImageList("io.il", GetInternalApplication("tfmodel")->GetParameterOutputImage("out"));
-    UpdateInternalParameters("train");
-    ExecuteInternal("train");
-  }
-
-};
-} // namespace Wrapper
-} // namespace otb
-
-OTB_APPLICATION_EXPORT( otb::Wrapper::TrainClassifierFromDeepFeatures )
-- 
GitLab


From f243345778fac271652508fe7118bc027ebe905d Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Mon, 6 Jun 2022 09:51:59 +0200
Subject: [PATCH 08/12] CI: bump otbtf version

---
 .gitlab-ci.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 7f713d76..a0efc1a8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,7 +1,7 @@
 image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
 
 variables:
-  OTBTF_VERSION: 3.2.1
+  OTBTF_VERSION: 3.3
   OTB_BUILD: /src/otb/build/OTB/build  # Local OTB build directory
   OTBTF_SRC: /src/otbtf  # Local OTBTF source directory
   OTB_TEST_DIR: $OTB_BUILD/Testing/Temporary  # OTB testing directory
-- 
GitLab


From 5ba46eb1d7ad9d75be28fc1a2a58a0bc9975395b Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Mon, 6 Jun 2022 09:52:14 +0200
Subject: [PATCH 09/12] COMP: bump OTB version

---
 Dockerfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/Dockerfile b/Dockerfile
index da634cea..9c905bd1 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -85,7 +85,7 @@ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.gi
 
 ### OTB
 ARG GUI=false
-ARG OTB=7.4.0
+ARG OTB=8.0.1
 ARG OTBTESTS=false
 
 RUN mkdir /src/otb
-- 
GitLab


From f337926649434e1fe1d72ebf1b54204ae5d5c0a2 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Mon, 6 Jun 2022 09:55:29 +0200
Subject: [PATCH 10/12] CI: bump otbtf version

---
 .gitlab-ci.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index a0efc1a8..7f713d76 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,7 +1,7 @@
 image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME
 
 variables:
-  OTBTF_VERSION: 3.3
+  OTBTF_VERSION: 3.2.1
   OTB_BUILD: /src/otb/build/OTB/build  # Local OTB build directory
   OTBTF_SRC: /src/otbtf  # Local OTBTF source directory
   OTB_TEST_DIR: $OTB_BUILD/Testing/Temporary  # OTB testing directory
-- 
GitLab


From dfdddd71627d96b15807533fac5a1a18df7ccff5 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@irstea.fr>
Date: Mon, 6 Jun 2022 15:44:01 +0200
Subject: [PATCH 11/12] COMP: move IsNoData() inside otb::wrapper::

---
 app/otbPatchesSelection.cxx | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/app/otbPatchesSelection.cxx b/app/otbPatchesSelection.cxx
index 68d76221..3437849b 100644
--- a/app/otbPatchesSelection.cxx
+++ b/app/otbPatchesSelection.cxx
@@ -35,6 +35,12 @@
 #include <random>
 #include <limits>
 
+namespace otb
+{
+
+namespace Wrapper
+{
+
 // Functor to retrieve nodata
 template<class TPixel, class OutputPixel>
 class IsNoData
@@ -62,12 +68,6 @@ private:
   typename TPixel::ValueType m_NoDataValue;
 };
 
-namespace otb
-{
-
-namespace Wrapper
-{
-
 class PatchesSelection : public Application
 {
 public:
-- 
GitLab


From 5bcc22cda33db7433d66f5d63ae6d9f6a4bc07c2 Mon Sep 17 00:00:00 2001
From: Remi Cresson <remi.cresson@inrae.fr>
Date: Tue, 19 Jul 2022 11:47:24 +0200
Subject: [PATCH 12/12] COMP: use SuperBuild GDAL

---
 Dockerfile                       |  4 ++--
 app/otbPatchesSelection.cxx      | 11 ++++++-----
 tools/docker/build-deps-cli.txt  |  7 -------
 tools/docker/build-flags-otb.txt |  4 ++--
 4 files changed, 10 insertions(+), 16 deletions(-)

diff --git a/Dockerfile b/Dockerfile
index da634cea..990c55f5 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -85,7 +85,7 @@ RUN git clone --single-branch -b $TF https://github.com/tensorflow/tensorflow.gi
 
 ### OTB
 ARG GUI=false
-ARG OTB=7.4.0
+ARG OTB=8.0.1
 ARG OTBTESTS=false
 
 RUN mkdir /src/otb
@@ -149,7 +149,7 @@ COPY --from=builder /src /src
 # System-wide ENV
 ENV PATH="/opt/otbtf/bin:$PATH"
 ENV LD_LIBRARY_PATH="/opt/otbtf/lib:$LD_LIBRARY_PATH"
-ENV PYTHONPATH="/opt/otbtf/lib/python3/site-packages:/opt/otbtf/lib/otb/python:/src/otbtf"
+ENV PYTHONPATH="/opt/otbtf/lib/python3/site-packages:/opt/otbtf/lib/python3/dist-packages:/opt/otbtf/lib/otb/python:/src/otbtf"
 ENV OTB_APPLICATION_PATH="/opt/otbtf/lib/otb/applications"
 
 # Default user, directory and command (bash is the entrypoint when using 'docker create')
diff --git a/app/otbPatchesSelection.cxx b/app/otbPatchesSelection.cxx
index 68d76221..73b0b0de 100644
--- a/app/otbPatchesSelection.cxx
+++ b/app/otbPatchesSelection.cxx
@@ -35,6 +35,12 @@
 #include <random>
 #include <limits>
 
+namespace otb
+{
+
+namespace Wrapper
+{
+
 // Functor to retrieve nodata
 template<class TPixel, class OutputPixel>
 class IsNoData
@@ -62,11 +68,6 @@ private:
   typename TPixel::ValueType m_NoDataValue;
 };
 
-namespace otb
-{
-
-namespace Wrapper
-{
 
 class PatchesSelection : public Application
 {
diff --git a/tools/docker/build-deps-cli.txt b/tools/docker/build-deps-cli.txt
index 5d699cb1..ffd72911 100644
--- a/tools/docker/build-deps-cli.txt
+++ b/tools/docker/build-deps-cli.txt
@@ -25,8 +25,6 @@ wget
 zip
 
 bison
-gdal-bin
-python3-gdal
 libboost-date-time-dev
 libboost-filesystem-dev
 libboost-graph-dev
@@ -36,8 +34,6 @@ libboost-thread-dev
 libcurl4-gnutls-dev
 libexpat1-dev
 libfftw3-dev
-libgdal-dev
-libgeotiff-dev
 libgsl-dev
 libinsighttoolkit4-dev
 libkml-dev
@@ -45,9 +41,6 @@ libmuparser-dev
 libmuparserx-dev
 libopencv-core-dev
 libopencv-ml-dev
-libopenthreads-dev
-libossim-dev
-libpng-dev
 libsvm-dev
 libtinyxml-dev
 zlib1g-dev
diff --git a/tools/docker/build-flags-otb.txt b/tools/docker/build-flags-otb.txt
index 2c3e0fea..56b0434c 100644
--- a/tools/docker/build-flags-otb.txt
+++ b/tools/docker/build-flags-otb.txt
@@ -3,9 +3,9 @@
 -DUSE_SYSTEM_EXPAT=ON
 -DUSE_SYSTEM_FFTW=ON
 -DUSE_SYSTEM_FREETYPE=ON
--DUSE_SYSTEM_GDAL=ON
+-DUSE_SYSTEM_GDAL=OFF
 -DUSE_SYSTEM_GEOS=ON
--DUSE_SYSTEM_GEOTIFF=ON
+-DUSE_SYSTEM_GEOTIFF=OFF
 -DUSE_SYSTEM_GLEW=ON
 -DUSE_SYSTEM_GLFW=ON
 -DUSE_SYSTEM_GLUT=ON
-- 
GitLab