changeset 454:f4244c7e2725 improve-delete

merge pg-transactions -> improve-delete
author Alain Mazy <am@osimis.io>
date Mon, 22 Jan 2024 10:00:28 +0100
parents 95c413106a66 (current diff) 0a8b34e3a337 (diff)
children d0dd95ff0662
files MySQL/CMakeLists.txt MySQL/Plugins/MySQLIndex.cpp TODO
diffstat 49 files changed, 2088 insertions(+), 669 deletions(-) [+]
line wrap: on
line diff
--- a/Framework/Common/DatabaseManager.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Common/DatabaseManager.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -492,8 +492,8 @@
           return dynamic_cast<const Integer64Value&>(value).GetValue();
 
         default:
-          //LOG(ERROR) << value.Format();
-          throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
+          // LOG(ERROR) << value.GetType();
+          throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, "The returned field is not of the correct type (Integer64)");
       }
     }
   }
@@ -511,8 +511,7 @@
 
       if (value != static_cast<int64_t>(static_cast<int32_t>(value)))
       {
-        LOG(ERROR) << "Integer overflow";
-        throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
+        throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, "Integer overflow");
       }
       else
       {
@@ -521,7 +520,18 @@
     }
   }
 
-    
+  bool DatabaseManager::StatementBase::IsNull(size_t field) const
+  {
+    if (IsDone())
+    {
+      throw Orthanc::OrthancException(Orthanc::ErrorCode_Database);
+    }
+    else
+    {
+      return GetResultField(field).GetType() == ValueType_Null;
+    }
+  }
+
   std::string DatabaseManager::StatementBase::ReadString(size_t field) const
   {
     const IValue& value = GetResultField(field);
@@ -535,8 +545,7 @@
         return dynamic_cast<const Utf8StringValue&>(value).GetContent();
 
       default:
-        //LOG(ERROR) << value.Format();
-        throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);
+        throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError, "The returned field is not of the correct type (String)");
     }
   }
   
@@ -560,8 +569,7 @@
     }
   }
 
-      
-  void DatabaseManager::CachedStatement::Execute(const Dictionary& parameters)
+  void DatabaseManager::CachedStatement::ExecuteInternal(const Dictionary& parameters, bool withResults)
   {
     try
     {
@@ -587,7 +595,14 @@
         #endif
       */
 
-      SetResult(GetTransaction().Execute(*statement_, parameters));
+      if (withResults)
+      {
+        SetResult(GetTransaction().Execute(*statement_, parameters));
+      }
+      else
+      {
+        GetTransaction().ExecuteWithoutResult(*statement_, parameters);
+      }
     }
     catch (Orthanc::OrthancException& e)
     {
@@ -595,7 +610,18 @@
       throw;
     }
   }
+
+      
+  void DatabaseManager::CachedStatement::Execute(const Dictionary& parameters)
+  {
+    ExecuteInternal(parameters, true);
+  }
   
+  void DatabaseManager::CachedStatement::ExecuteWithoutResult(const Dictionary& parameters)
+  {
+    ExecuteInternal(parameters, false);
+  }
+
   
   DatabaseManager::StandaloneStatement::StandaloneStatement(DatabaseManager& manager,
                                                             const std::string& sql) :
@@ -615,6 +641,16 @@
 
   void DatabaseManager::StandaloneStatement::Execute(const Dictionary& parameters)
   {
+    ExecuteInternal(parameters, true);
+  }
+
+  void DatabaseManager::StandaloneStatement::ExecuteWithoutResult(const Dictionary& parameters)
+  {
+    ExecuteInternal(parameters, false);
+  }
+
+  void DatabaseManager::StandaloneStatement::ExecuteInternal(const Dictionary& parameters, bool withResults)
+  {
     try
     {
       std::unique_ptr<Query> query(ReleaseQuery());
@@ -626,7 +662,14 @@
       statement_.reset(GetManager().GetDatabase().Compile(*query));
       assert(statement_.get() != NULL);
 
-      SetResult(GetTransaction().Execute(*statement_, parameters));
+      if (withResults)
+      {
+        SetResult(GetTransaction().Execute(*statement_, parameters));
+      }
+      else
+      {
+        GetTransaction().Execute(*statement_, parameters);
+      }
     }
     catch (Orthanc::OrthancException& e)
     {
--- a/Framework/Common/DatabaseManager.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Common/DatabaseManager.h	Mon Jan 22 10:00:28 2024 +0100
@@ -187,6 +187,8 @@
 
       std::string ReadString(size_t field) const;
 
+      bool IsNull(size_t field) const;
+
       void PrintResult(std::ostream& stream)
       {
         IResult::Print(stream, GetResult());
@@ -219,6 +221,17 @@
       }
 
       void Execute(const Dictionary& parameters);
+
+      void ExecuteWithoutResult()
+      {
+        Dictionary parameters;
+        ExecuteWithoutResult(parameters);
+      }
+
+      void ExecuteWithoutResult(const Dictionary& parameters);
+
+    private:
+      void ExecuteInternal(const Dictionary& parameters, bool withResults);
     };
 
 
@@ -240,6 +253,17 @@
       }
 
       void Execute(const Dictionary& parameters);
+
+      void ExecuteWithoutResult()
+      {
+        Dictionary parameters;
+        ExecuteWithoutResult(parameters);
+      }
+
+      void ExecuteWithoutResult(const Dictionary& parameters);
+
+    private:
+      void ExecuteInternal(const Dictionary& parameters, bool withResults);
     };
   };
 }
--- a/Framework/Common/ResultBase.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Common/ResultBase.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -56,11 +56,6 @@
       
     for (size_t i = 0; i < fields_.size(); i++)
     {
-      if (fields_[i] == NULL)
-      {
-        throw Orthanc::OrthancException(Orthanc::ErrorCode_NullPointer);
-      }
-
       ValueType sourceType = fields_[i]->GetType();
       ValueType targetType = expectedType_[i];
 
@@ -95,11 +90,11 @@
       for (size_t i = 0; i < fields_.size(); i++)
       {
         fields_[i] = FetchField(i);
+      }
 
-        if (fields_[i] == NULL)
-        {
-          throw Orthanc::OrthancException(Orthanc::ErrorCode_NullPointer);
-        }
+      if (fields_.size() == 1 && fields_[0] == NULL)  // this is a "void" result
+      {
+        return;
       }
 
       ConvertFields();
--- a/Framework/Plugins/DatabaseBackendAdapterV4.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Plugins/DatabaseBackendAdapterV4.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -431,6 +431,9 @@
         response.mutable_get_system_information()->set_supports_flush_to_disk(false);
         response.mutable_get_system_information()->set_supports_revisions(accessor.GetBackend().HasRevisionsSupport());
         response.mutable_get_system_information()->set_supports_labels(accessor.GetBackend().HasLabelsSupport());
+        response.mutable_get_system_information()->set_supports_increment_global_property(accessor.GetBackend().HasAtomicIncrementGlobalProperty());
+        response.mutable_get_system_information()->set_has_update_and_get_statistics(accessor.GetBackend().HasUpdateAndGetStatistics());
+        response.mutable_get_system_information()->set_has_measure_latency(accessor.GetBackend().HasMeasureLatency());
         break;
       }
 
@@ -514,7 +517,14 @@
         
         break;
       }
-              
+
+      case Orthanc::DatabasePluginMessages::OPERATION_MEASURE_LATENCY:
+      {
+        IndexConnectionsPool::Accessor accessor(pool);
+        response.mutable_measure_latency()->set_latency_us(accessor.GetBackend().MeasureLatency(accessor.GetManager()));
+        break;
+      }
+
       default:
         LOG(ERROR) << "Not implemented database operation from protobuf: " << request.operation();
         throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
@@ -941,7 +951,30 @@
         
         break;
       }
-      
+
+      case Orthanc::DatabasePluginMessages::OPERATION_UPDATE_AND_GET_STATISTICS:
+      {
+        int64_t patientsCount, studiesCount, seriesCount, instancesCount, compressedSize, uncompressedSize;
+        backend.UpdateAndGetStatistics(manager, patientsCount, studiesCount, seriesCount, instancesCount, compressedSize, uncompressedSize);
+
+        response.mutable_update_and_get_statistics()->set_patients_count(patientsCount);
+        response.mutable_update_and_get_statistics()->set_studies_count(studiesCount);
+        response.mutable_update_and_get_statistics()->set_series_count(seriesCount);
+        response.mutable_update_and_get_statistics()->set_instances_count(instancesCount);
+        response.mutable_update_and_get_statistics()->set_total_compressed_size(compressedSize);
+        response.mutable_update_and_get_statistics()->set_total_uncompressed_size(uncompressedSize);
+        break;
+      }
+
+      case Orthanc::DatabasePluginMessages::OPERATION_INCREMENT_GLOBAL_PROPERTY:
+      {
+        int64_t value = backend.IncrementGlobalProperty(manager, request.increment_global_property().server_id().c_str(),
+                                                        request.increment_global_property().property(),
+                                                        request.increment_global_property().increment());
+        response.mutable_increment_global_property()->set_new_value(value);
+        break;
+      }
+
       case Orthanc::DatabasePluginMessages::OPERATION_LOOKUP_METADATA:
       {
         std::string value;
@@ -1325,7 +1358,14 @@
     }
     catch (::Orthanc::OrthancException& e)
     {
-      LOG(ERROR) << "Exception in database back-end: " << e.What();
+      if (e.GetErrorCode() == ::Orthanc::ErrorCode_DatabaseCannotSerialize)
+      {
+        LOG(WARNING) << "An SQL transaction failed and will likely be retried: " << e.GetDetails();
+      }
+      else
+      {
+        LOG(ERROR) << "Exception in database back-end: " << e.What();
+      }
       return static_cast<OrthancPluginErrorCode>(e.GetErrorCode());
     }
     catch (::std::runtime_error& e)
--- a/Framework/Plugins/GlobalProperties.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Plugins/GlobalProperties.h	Mon Jan 22 10:00:28 2024 +0100
@@ -31,7 +31,7 @@
    * The enum "GlobalProperty" is a subset of the "GlobalProperty_XXX"
    * values from the Orthanc server that have a special meaning to the
    * database plugins:
-   * https://hg.orthanc-server.com/orthanc/file/default/OrthancServer/Sources/ServerEnumerations.h
+   * https://orthanc.uclouvain.be/hg/orthanc/file/default/OrthancServer/Sources/ServerEnumerations.h
    *
    * WARNING: The values must be the same between the Orthanc core and
    * this enum!
--- a/Framework/Plugins/IDatabaseBackend.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Plugins/IDatabaseBackend.h	Mon Jan 22 10:00:28 2024 +0100
@@ -348,5 +348,34 @@
     // New in Orthanc 1.12.0
     virtual void ListAllLabels(std::list<std::string>& target,
                                DatabaseManager& manager) = 0;
+
+    // New in Orthanc 1.12.X
+    virtual bool HasAtomicIncrementGlobalProperty() = 0;
+
+    // New in Orthanc 1.12.X
+    virtual int64_t IncrementGlobalProperty(DatabaseManager& manager,
+                                            const char* serverIdentifier,
+                                            int32_t property,
+                                            int64_t increment) = 0;
+
+    // New in Orthanc 1.12.X
+    virtual bool HasUpdateAndGetStatistics() = 0;
+
+    // New in Orthanc 1.12.X
+    virtual void UpdateAndGetStatistics(DatabaseManager& manager,
+                                        int64_t& patientsCount,
+                                        int64_t& studiesCount,
+                                        int64_t& seriesCount,
+                                        int64_t& instancesCount,
+                                        int64_t& compressedSize,
+                                        int64_t& uncompressedSize) = 0;
+
+    // New in Orthanc 1.12.X
+    virtual bool HasMeasureLatency() = 0;
+
+    // New in Orthanc 1.12.X
+    virtual uint64_t MeasureLatency(DatabaseManager& manager) = 0;
+
+
   };
 }
--- a/Framework/Plugins/IndexBackend.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Plugins/IndexBackend.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -33,6 +33,7 @@
 #include <Compatibility.h>  // For std::unique_ptr<>
 #include <Logging.h>
 #include <OrthancException.h>
+#include <Toolbox.h>
 
 
 namespace OrthancDatabases
@@ -176,6 +177,16 @@
             statement.IsDone());
   }
 
+  void IndexBackend::ClearRemainingAncestor(DatabaseManager& manager)
+  {
+    DatabaseManager::CachedStatement statement(
+      STATEMENT_FROM_HERE, manager,
+      "DELETE FROM RemainingAncestor");
+
+    statement.Execute();
+  }
+
+
 
   void IndexBackend::ClearDeletedFiles(DatabaseManager& manager)
   {
@@ -433,14 +444,7 @@
   {
     ClearDeletedFiles(manager);
     ClearDeletedResources(manager);
-    
-    {
-      DatabaseManager::CachedStatement statement(
-        STATEMENT_FROM_HERE, manager,
-        "DELETE FROM RemainingAncestor");
-
-      statement.Execute();
-    }
+    ClearRemainingAncestor(manager);
       
     {
       DatabaseManager::CachedStatement statement(
@@ -460,7 +464,6 @@
       DatabaseManager::CachedStatement statement(
         STATEMENT_FROM_HERE, manager,
         "SELECT * FROM RemainingAncestor");
-
       statement.Execute();
 
       if (!statement.IsDone())
@@ -473,9 +476,10 @@
         assert((statement.Next(), statement.IsDone()));
       }
     }
-    
+
     SignalDeletedFiles(output, manager);
     SignalDeletedResources(output, manager);
+
   }
 
 
@@ -1204,7 +1208,41 @@
     }
   }
 
-    
+  bool IndexBackend::HasAtomicIncrementGlobalProperty()
+  {
+    return false; // currently only implemented in Postgres
+  }
+
+  int64_t IndexBackend::IncrementGlobalProperty(DatabaseManager& manager,
+                                                const char* serverIdentifier,
+                                                int32_t property,
+                                                int64_t increment)
+  {
+    throw Orthanc::OrthancException(Orthanc::ErrorCode_NotImplemented);
+  }
+
+  bool IndexBackend::HasUpdateAndGetStatistics()
+  {
+    return false; // currently only implemented in Postgres
+  }
+
+  void IndexBackend::UpdateAndGetStatistics(DatabaseManager& manager,
+                                            int64_t& patientsCount,
+                                            int64_t& studiesCount,
+                                            int64_t& seriesCount,
+                                            int64_t& instancesCount,
+                                            int64_t& compressedSize,
+                                            int64_t& uncompressedSize)
+  {
+    throw Orthanc::OrthancException(Orthanc::ErrorCode_NotImplemented);
+  }
+
+  bool IndexBackend::HasMeasureLatency()
+  {
+    return true;
+  }
+
+
   void IndexBackend::LookupIdentifier(std::list<int64_t>& target /*out*/,
                                       DatabaseManager& manager,
                                       OrthancPluginResourceType resourceType,
@@ -2036,7 +2074,7 @@
     virtual bool IsEscapeBrackets() const
     {
       // This was initially done at a bad location by the following changeset:
-      // https://hg.orthanc-server.com/orthanc-databases/rev/389c037387ea
+      // https://orthanc.uclouvain.be/hg/orthanc-databases/rev/389c037387ea
       return (dialect_ == Dialect_MSSQL);
     }
 
@@ -2839,6 +2877,28 @@
   }
 
 
+  uint64_t IndexBackend::MeasureLatency(DatabaseManager& manager)
+  {
+    // execute 11x the simplest statement and return the median value
+    std::vector<uint64_t> measures;
+
+    for (int i = 0; i < 11; i++)
+    {
+      DatabaseManager::StandaloneStatement statement(manager, "SELECT 1");
+
+      Orthanc::Toolbox::ElapsedTimer timer;
+
+      statement.ExecuteWithoutResult();
+
+      measures.push_back(timer.GetElapsedMicroseconds());
+    }
+    
+    std::sort(measures.begin(), measures.end());
+
+    return measures[measures.size() / 2];
+  }
+
+
   DatabaseManager* IndexBackend::CreateSingleDatabaseManager(IDatabaseBackend& backend,
                                                              bool hasIdentifierTags,
                                                              const std::list<IdentifierTag>& identifierTags)
--- a/Framework/Plugins/IndexBackend.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Plugins/IndexBackend.h	Mon Jan 22 10:00:28 2024 +0100
@@ -46,9 +46,11 @@
     std::unique_ptr<IDatabaseBackendOutput::IFactory>  outputFactory_;
     
   protected:
-    void ClearDeletedFiles(DatabaseManager& manager);
+    virtual void ClearDeletedFiles(DatabaseManager& manager);
 
-    void ClearDeletedResources(DatabaseManager& manager);
+    virtual void ClearDeletedResources(DatabaseManager& manager);
+
+    virtual void ClearRemainingAncestor(DatabaseManager& manager);
 
     void SignalDeletedFiles(IDatabaseBackendOutput& output,
                             DatabaseManager& manager);
@@ -396,6 +398,27 @@
     virtual void ListAllLabels(std::list<std::string>& target,
                                DatabaseManager& manager) ORTHANC_OVERRIDE;
     
+    virtual bool HasAtomicIncrementGlobalProperty() ORTHANC_OVERRIDE;
+
+    virtual int64_t IncrementGlobalProperty(DatabaseManager& manager,
+                                            const char* serverIdentifier,
+                                            int32_t property,
+                                            int64_t increment) ORTHANC_OVERRIDE;
+
+    virtual bool HasUpdateAndGetStatistics() ORTHANC_OVERRIDE;
+
+    virtual void UpdateAndGetStatistics(DatabaseManager& manager,
+                                        int64_t& patientsCount,
+                                        int64_t& studiesCount,
+                                        int64_t& seriesCount,
+                                        int64_t& instancesCount,
+                                        int64_t& compressedSize,
+                                        int64_t& uncompressedSize) ORTHANC_OVERRIDE;
+
+    virtual bool HasMeasureLatency() ORTHANC_OVERRIDE;
+
+    virtual uint64_t MeasureLatency(DatabaseManager& manager) ORTHANC_OVERRIDE;
+
     /**
      * "maxDatabaseRetries" is to handle
      * "OrthancPluginErrorCode_DatabaseCannotSerialize" if there is a
--- a/Framework/Plugins/IndexUnitTests.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/Plugins/IndexUnitTests.h	Mon Jan 22 10:00:28 2024 +0100
@@ -57,7 +57,7 @@
   /**
    * Mock enumeration inspired from the source code of Orthanc... only
    * for use in the unit tests!
-   * https://hg.orthanc-server.com/orthanc/file/default/OrthancServer/Sources/ServerEnumerations.h
+   * https://orthanc.uclouvain.be/hg/orthanc/file/default/OrthancServer/Sources/ServerEnumerations.h
    **/
   enum MetadataType
   {
--- a/Framework/PostgreSQL/PostgreSQLDatabase.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLDatabase.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -159,7 +159,10 @@
 
   void PostgreSQLDatabase::ExecuteMultiLines(const std::string& sql)
   {
-    LOG(TRACE) << "PostgreSQL: " << sql;
+    if (IsVerboseEnabled())
+    {
+      LOG(INFO) << "PostgreSQL: " << sql;
+    }
     Open();
 
     PGresult* result = PQexec(reinterpret_cast<PGconn*>(pg_), sql.c_str());
@@ -318,13 +321,15 @@
 
   PostgreSQLDatabase::TransientAdvisoryLock::TransientAdvisoryLock(
     PostgreSQLDatabase&  database,
-    int32_t lock) :
+    int32_t lock,
+    unsigned int retries,
+    unsigned int retryInterval) :
     database_(database),
     lock_(lock)
   {
     bool locked = true;
     
-    for (unsigned int i = 0; i < 10; i++)
+    for (unsigned int i = 0; i < retries; i++)
     {
       if (database_.AcquireAdvisoryLock(lock_))
       {
@@ -333,7 +338,7 @@
       }
       else
       {
-        boost::this_thread::sleep(boost::posix_time::milliseconds(500));
+        boost::this_thread::sleep(boost::posix_time::milliseconds(retryInterval));
       }
     }
 
--- a/Framework/PostgreSQL/PostgreSQLDatabase.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLDatabase.h	Mon Jan 22 10:00:28 2024 +0100
@@ -36,6 +36,7 @@
   private:
     friend class PostgreSQLStatement;
     friend class PostgreSQLLargeObject;
+    friend class PostgreSQLTransaction;
 
     class Factory;
 
@@ -59,6 +60,11 @@
 
     void Open();
 
+    bool IsVerboseEnabled() const
+    {
+      return parameters_.IsVerboseEnabled();
+    }
+
     bool AcquireAdvisoryLock(int32_t lock);
 
     bool ReleaseAdvisoryLock(int32_t lock);
@@ -91,7 +97,9 @@
 
     public:
       TransientAdvisoryLock(PostgreSQLDatabase&  database,
-                            int32_t lock);
+                            int32_t lock,
+                            unsigned int retries = 10,
+                            unsigned int retryInterval = 500);
 
       ~TransientAdvisoryLock();
     };
@@ -99,5 +107,17 @@
     static IDatabaseFactory* CreateDatabaseFactory(const PostgreSQLParameters& parameters);
 
     static PostgreSQLDatabase* CreateDatabaseConnection(const PostgreSQLParameters& parameters);
+
+  protected:
+    const char* GetReadWriteTransactionStatement() const
+    {
+      return parameters_.GetReadWriteTransactionStatement();
+    }
+
+    const char* GetReadOnlyTransactionStatement() const
+    {
+      return parameters_.GetReadOnlyTransactionStatement();
+    }
+
   };
 }
--- a/Framework/PostgreSQL/PostgreSQLParameters.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLParameters.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -42,6 +42,7 @@
     lock_ = true;
     maxConnectionRetries_ = 10;
     connectionRetryInterval_ = 5;
+    isVerboseEnabled_ = false;
   }
 
 
@@ -94,8 +95,28 @@
 
     lock_ = configuration.GetBooleanValue("Lock", true);  // Use locking by default
 
+    isVerboseEnabled_ = configuration.GetBooleanValue("EnableVerboseLogs", false);
+
     maxConnectionRetries_ = configuration.GetUnsignedIntegerValue("MaximumConnectionRetries", 10);
     connectionRetryInterval_ = configuration.GetUnsignedIntegerValue("ConnectionRetryInterval", 5);
+
+    std::string transactionMode = configuration.GetStringValue("TransactionMode", "SERIALIZABLE");
+    if (transactionMode == "DEFAULT")
+    {
+      SetIsolationMode(IsolationMode_DbDefault);
+    }
+    else if (transactionMode == "READ COMMITTED")
+    {
+      SetIsolationMode(IsolationMode_ReadCommited);
+    }
+    else if (transactionMode == "SERIALIZABLE")
+    {
+      SetIsolationMode(IsolationMode_Serializable);
+    }
+    else
+    {
+      throw Orthanc::OrthancException(Orthanc::ErrorCode_BadParameterType, std::string("Invalid value for 'TransactionMode': ") + transactionMode);
+    }
   }
 
 
--- a/Framework/PostgreSQL/PostgreSQLParameters.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLParameters.h	Mon Jan 22 10:00:28 2024 +0100
@@ -30,6 +30,13 @@
 
 namespace OrthancDatabases
 {
+  enum IsolationMode
+  {
+    IsolationMode_DbDefault = 0,
+    IsolationMode_Serializable = 1,
+    IsolationMode_ReadCommited = 2
+  };
+
   class PostgreSQLParameters
   {
   private:
@@ -43,7 +50,8 @@
     bool         lock_;
     unsigned int maxConnectionRetries_;
     unsigned int connectionRetryInterval_;
-
+    bool         isVerboseEnabled_;
+    IsolationMode isolationMode_;
     void Reset();
 
   public:
@@ -125,6 +133,52 @@
       return connectionRetryInterval_;
     }
 
+    void SetIsolationMode(IsolationMode isolationMode)
+    {
+      isolationMode_ = isolationMode;
+    }
+
+    const char* GetReadWriteTransactionStatement() const
+    {
+      switch (isolationMode_)
+      {
+        case IsolationMode_DbDefault:
+          return "";
+        case IsolationMode_ReadCommited:
+          return "SET TRANSACTION ISOLATION LEVEL READ COMMITTED READ WRITE";
+        case IsolationMode_Serializable:
+          return "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE";
+        default:
+          throw Orthanc::OrthancException(Orthanc::ErrorCode_NotImplemented);
+      }
+    }
+
+    const char* GetReadOnlyTransactionStatement() const
+    {
+      switch (isolationMode_)
+      {
+        case IsolationMode_DbDefault:
+          return "";
+        case IsolationMode_ReadCommited:
+          return "SET TRANSACTION ISOLATION LEVEL READ COMMITTED READ ONLY";
+        case IsolationMode_Serializable:
+          return "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY";
+        default:
+          throw Orthanc::OrthancException(Orthanc::ErrorCode_NotImplemented);
+      }
+    }
+
+    void SetVerboseEnabled(bool enabled)
+    {
+      isVerboseEnabled_ = enabled;
+    }
+
+    bool IsVerboseEnabled() const
+    {
+      return isVerboseEnabled_;
+    }
+
+
     void Format(std::string& target) const;
   };
 }
--- a/Framework/PostgreSQL/PostgreSQLResult.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLResult.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -84,6 +84,11 @@
     position_(0), 
     database_(statement.GetDatabase())
   {
+    if (database_.IsVerboseEnabled())
+    {
+      LOG(INFO) << "PostgreSQL: " << statement.sql_;
+    }
+    
     result_ = statement.Execute();
     assert(result_ != NULL);   // An exception would have been thrown otherwise
 
@@ -255,6 +260,9 @@
       case OIDOID:
         return new LargeObjectResult(database_, GetLargeObjectOid(column));
 
+      case VOIDOID:
+        return NULL;
+
       default:
         throw Orthanc::OrthancException(Orthanc::ErrorCode_NotImplemented);
     }
--- a/Framework/PostgreSQL/PostgreSQLStatement.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLStatement.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -287,7 +287,8 @@
       }
       
 #if ORTHANC_PLUGINS_VERSION_IS_ABOVE(1, 9, 2)
-      throw Orthanc::OrthancException(Orthanc::ErrorCode_DatabaseCannotSerialize);
+      std::string errorString(PQresultErrorMessage(result));
+      throw Orthanc::OrthancException(Orthanc::ErrorCode_DatabaseCannotSerialize, errorString, false); // don't log here, it is handled at higher level
 #else
       throw Orthanc::OrthancException(Orthanc::ErrorCode_Database, "Collision between multiple writers");
 #endif
@@ -308,7 +309,10 @@
     inputs_(new Inputs),
     formatter_(Dialect_PostgreSQL)
   {
-    LOG(TRACE) << "PostgreSQL: " << sql;
+    if (database.IsVerboseEnabled())
+    {
+      LOG(TRACE) << "PostgreSQL: " << sql;
+    }
   }
 
 
@@ -319,7 +323,11 @@
     formatter_(Dialect_PostgreSQL)
   {
     query.Format(sql_, formatter_);
-    LOG(TRACE) << "PostgreSQL: " << sql_;
+    
+    if (database.IsVerboseEnabled())
+    {
+      LOG(TRACE) << "PostgreSQL: " << sql_;
+    }
 
     for (size_t i = 0; i < formatter_.GetParametersCount(); i++)
     {
--- a/Framework/PostgreSQL/PostgreSQLTransaction.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Framework/PostgreSQL/PostgreSQLTransaction.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -70,12 +70,22 @@
     switch (type)
     {
       case TransactionType_ReadWrite:
-        database_.ExecuteMultiLines("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ WRITE");
-        break;
+      {
+        const std::string& statement = database_.GetReadWriteTransactionStatement();
+        if (!statement.empty()) // if not defined, will use the default DB transaction isolation level
+        {
+          database_.ExecuteMultiLines(statement);
+        }
+       }; break;
 
       case TransactionType_ReadOnly:
-        database_.ExecuteMultiLines("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY");
-        break;
+      {
+        const std::string& statement = database_.GetReadOnlyTransactionStatement();
+        if (!statement.empty()) // if not defined, will use the default DB transaction isolation level
+        {
+          database_.ExecuteMultiLines(statement);
+        }
+       }; break;
 
       default:
         throw Orthanc::OrthancException(Orthanc::ErrorCode_ParameterOutOfRange);
--- a/MySQL/CMakeLists.txt	Wed Sep 13 13:32:54 2023 +0200
+++ b/MySQL/CMakeLists.txt	Mon Jan 22 10:00:28 2024 +0100
@@ -131,6 +131,9 @@
 
 add_dependencies(FrameworkForPlugins AutogeneratedTarget)
 
+DefineSourceBasenameForTarget(OrthancMySQLIndex)
+DefineSourceBasenameForTarget(OrthancMySQLStorage)
+
 target_link_libraries(OrthancMySQLIndex FrameworkForPlugins)
 target_link_libraries(OrthancMySQLStorage FrameworkForPlugins)
 
--- a/MySQL/Plugins/MySQLIndex.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/MySQL/Plugins/MySQLIndex.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -237,7 +237,7 @@
         // to the LONGTEXT type (up to 4GB). This might be important
         // for applications such as the Osimis Web viewer that stores
         // large amount of metadata.
-        // http://book.orthanc-server.com/faq/features.html#central-registry-of-metadata-and-attachments
+        // https://orthanc.uclouvain.be/book/faq/features.html#central-registry-of-metadata-and-attachments
         t.GetDatabaseTransaction().ExecuteMultiLines("ALTER TABLE Metadata MODIFY value LONGTEXT");
         
         revision = 4;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/NOTES	Mon Jan 22 10:00:28 2024 +0100
@@ -0,0 +1,230 @@
+Resources:
+*********
+- PG transaction modes explained: https://www.postgresql.org/files/developer/concurrency.pdf
+- Isolation level explained (PG + MySQL): https://amirsoleimani.medium.com/understanding-database-isolation-level-via-examples-mysql-and-postgres-a86b5502d404
+- Message queuing in PG: https://www.crunchydata.com/blog/message-queuing-using-native-postgresql
+
+
+Create and delete instances Internals:
+*************************************
+
+isNewInstance = CreateInstance(...)
+
+if (!isNewInstance && overwriteInstances)
+  DeleteResource(instance)
+       -> ClearDeletedFiles(manager);
+            DELETE FROM DeletedFiles  ------> this is not a temporary table in postgres but it is supposed to be empty before and after each transaction -> it is empty when taking a snapshot of the DB in READ COMMITTED mode!!!
+          ClearDeletedResources(manager);
+            DELETE FROM DeletedResources  ------> this is not a temporary table in postgres but it is supposed to be empty before and after each transaction !!!
+
+            DELETE FROM RemainingAncestor  ------> this is not a temporary table in postgres but it is supposed to be empty before and after each transaction !!!
+            DELETE FROM Resources WHERE internalId=..
+               -> cascades delete the MainDicomTags, the Metadata and the AttachedFiles
+                  -> this triggers AttachedFileDeletedFunc
+                         INSERT INTO DeletedFiles VALUES
+                            (old.uuid, old.filetype, old.compressedSize,
+                            old.uncompressedSize, old.compressionType,
+                            old.uncompressedHash, old.compressedHash);
+                        RETURN NULL;
+               -> this triggers a SQL trigger: ResourceDeletedFunc
+                        INSERT INTO DeletedResources VALUES (old.resourceType, old.publicId);
+                        IF EXISTS (SELECT 1 FROM Resources WHERE parentId = old.parentId) THEN
+                            -- Signal that the deleted resource has a remaining parent 
+                            -- (a parent that must not be deleted but whose LastUpdate must be updated)
+                            INSERT INTO RemainingAncestor
+                            SELECT resourceType, publicId FROM Resources WHERE internalId = old.parentId;
+                        ELSE
+                            -- Delete a parent resource when its unique child is deleted 
+                            DELETE FROM Resources WHERE internalId = old.parentId;
+                        END IF;
+
+            SELECT * FROM RemainingAncestor
+              -> SignalRemainingAncestor()  // There is at most 1 remaining ancestor
+                 -> ServerIndex::TransactionContext::SignalRemainingAncestor()
+                    -> stores remainingType and remainingPublicId (this is used in StatelessDatabaseOperations::DeleteResource to build the Rest Response of /delete 
+                                                                   and to update the LastUpdate of all parent (only when deleted from /delete))
+
+          SignalDeletedFiles(output, manager);
+            SELECT * FROM DeletedFiles
+              -> SignalDeletedAttachment()
+                 -> ServerIndex::TransactionContext::SignalAttachmentDeleted()
+                    -> pendingFilesToRemove_.push_back(FileToRemove(info))  (files are deleted in CommitFilesToRemove in the ServerIndex::TransactionContext::Commit)
+
+          SignalDeletedResources(output, manager);
+            SELECT resourceType, publicId FROM DeletedResources
+              -> SignalDeletedResource()
+                 -> Emit DeletedResource event (lua)
+
+
+  if (!CreateInstance(...))
+    Error: "No new instance while overwriting; this should not happen"
+
+if isNewInstance -> LogChange
+if isNewSeries -> LogChange
+....
+
+Sample SQL code that you can execute in DBeaver to test new functions/procedures:
+
+CreateInstance
+************************************************************************
+
+CREATE OR REPLACE FUNCTION CreateInstance(
+  IN patient_public_id TEXT,
+  IN study_public_id TEXT,
+  IN series_public_id TEXT,
+  IN instance_public_id TEXT,
+  OUT is_new_patient BIGINT,
+  OUT is_new_study BIGINT,
+  OUT is_new_series BIGINT,
+  OUT is_new_instance BIGINT,
+  OUT patient_internal_id BIGINT,
+  OUT study_internal_id BIGINT,
+  OUT series_internal_id BIGINT,
+  OUT instance_internal_id BIGINT) AS $body$
+
+BEGIN
+	is_new_patient := 1;
+	is_new_study := 1;
+	is_new_series := 1;
+	is_new_instance := 1;
+
+	
+	BEGIN
+        INSERT INTO "resources" VALUES (DEFAULT, 0, patient_public_id, NULL);
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_patient := 0;
+    END;
+    SELECT internalid INTO patient_internal_id FROM "resources" WHERE publicId = patient_public_id AND resourcetype = 0 FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction
+
+	BEGIN
+        INSERT INTO "resources" VALUES (DEFAULT, 1, study_public_id, patient_internal_id);
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_study := 0;
+    END;
+    SELECT internalid INTO study_internal_id FROM "resources" WHERE publicId = study_public_id AND resourcetype = 1 FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction
+
+	BEGIN
+	    INSERT INTO "resources" VALUES (DEFAULT, 2, series_public_id, study_internal_id);
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_series := 0;
+    END;
+	SELECT internalid INTO series_internal_id FROM "resources" WHERE publicId = series_public_id AND resourcetype = 2 FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction
+
+  	BEGIN
+		INSERT INTO "resources" VALUES (DEFAULT, 3, instance_public_id, series_internal_id);
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_instance := 0;
+    END;
+    SELECT internalid INTO instance_internal_id FROM "resources" WHERE publicId = instance_public_id AND resourcetype = 3 FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction
+
+    IF is_new_instance > 0 THEN
+        -- Move the patient to the end of the recycling order.
+        PERFORM PatientAddedOrUpdated(patient_internal_id, 1);
+    END IF;  
+END;
+
+$body$ LANGUAGE plpgsql;
+
+
+DO $$ 
+DECLARE 
+    result record;
+begin
+	delete from "resources";
+
+    SELECT * INTO result from CreateInstance('patient1', 'study1', 'series1', 'instance1');
+
+    RAISE NOTICE 'Value patientInternalId: %, is_new: %', result.patient_internal_id, result.is_new_patient;
+    RAISE NOTICE 'Value studyInternalId: %, is_new: %', result.study_internal_id, result.is_new_study;
+    RAISE NOTICE 'Value seriesInternalId: %, is_new: %', result.series_internal_id, result.is_new_series;
+    RAISE NOTICE 'Value instanceInternalId: %, is_new: %', result.instance_internal_id, result.is_new_instance;
+    RAISE NOTICE '--------------';
+
+    SELECT * INTO result from CreateInstance('patient1', 'study1', 'series1', 'instance2');
+
+    RAISE NOTICE 'Value patientInternalId: %, is_new: %', result.patient_internal_id, result.is_new_patient;
+    RAISE NOTICE 'Value studyInternalId: %, is_new: %', result.study_internal_id, result.is_new_study;
+    RAISE NOTICE 'Value seriesInternalId: %, is_new: %', result.series_internal_id, result.is_new_series;
+    RAISE NOTICE 'Value instanceInternalId: %, is_new: %', result.instance_internal_id, result.is_new_instance;
+    RAISE NOTICE '--------------';
+
+    SELECT * INTO result from CreateInstance('patient1', 'study1', 'series1', 'instance2');
+
+    RAISE NOTICE 'Value patientInternalId: %, is_new: %', result.patient_internal_id, result.is_new_patient;
+    RAISE NOTICE 'Value studyInternalId: %, is_new: %', result.study_internal_id, result.is_new_study;
+    RAISE NOTICE 'Value seriesInternalId: %, is_new: %', result.series_internal_id, result.is_new_series;
+    RAISE NOTICE 'Value instanceInternalId: %, is_new: %', result.instance_internal_id, result.is_new_instance;
+    RAISE NOTICE '--------------';
+
+END $$;
+
+
+-- \set patient_key 'patient_key'
+-- SELECT CreateInstance('patient', 'study', 'series', 'instance', patient_key) ;
+
+-- drop function CreateInstance
+-- select * from "resources";
+-- delete from "resources";
+-- INSERT INTO "resources" VALUES (DEFAULT, 0, 'patient', NULL)
+
+
+
+************************************************************************
+
+In debug, no verbose logs, 10 connections
+Orthanc 1.12.1 + PG 5.1 (serializable mode)          : test_concurrent_anonymize_same_study with 4 workers and 10x repeat: 43.957 s
+Orthanc mainline + PG mainline (read-committed mode) : test_concurrent_anonymize_same_study with 4 workers and 10x repeat: 15.744 s
+                                                       test_concurrent_anonymize_same_study deletion took: 18.8 s
+
+Orthanc 1.12.1 + PG 5.1 (serializable mode)          : test_concurrent_uploads_same_study with 20 workers and 1x repeat: 21.214 s
+Orthanc mainline + PG mainline (read-committed mode) : test_concurrent_uploads_same_study with 20 workers and 1x repeat: 9.514 s
+
+Orthanc 1.12.1 + PG 5.1 (serializable mode)          : test_upload_delete_same_study_from_multiple_threads with 5 workers and 3x repeat (10x): 23.016 s
+Orthanc mainline + PG mainline (read-committed mode) : test_upload_delete_same_study_from_multiple_threads with 5 workers and 3x repeat (10x): 7.129 s 
+
+Orthanc mainline + PG mainline (read-committed mode) : test_upload_multiple_studies_from_multiple_threads with 10 workers and 25 files and repeat 3x: 6.454 s
+
+With Docker with 10 connections SQL:
+osimis/orthanc:24.1.2 : test_concurrent_anonymize_same_study with 4 workers and 10x repeat: 18.956 s  FAIL !!!
+                        test_concurrent_anonymize_same_study deletion took: NA
+osimis/orthanc:current: test_concurrent_anonymize_same_study with 4 workers and 10x repeat:  6.867 s
+                        test_concurrent_anonymize_same_study deletion took: 9.095 s
+
+osimis/orthanc:24.1.2 : test_concurrent_uploads_same_study with 20 workers and 1x repeat:  9.822 s 
+osimis/orthanc:current: test_concurrent_uploads_same_study with 20 workers and 1x repeat: 16.027 s up to 38s !  (slower but the test is not representative of a real life scenario !!!!!)
+
+osimis/orthanc:24.1.2 : test_upload_delete_same_study_from_multiple_threads with 5 workers and 3x repeat (10x): 12.966 s
+osimis/orthanc:current: test_upload_delete_same_study_from_multiple_threads with 5 workers and 3x repeat (10x):  4.196 s
+
+osimis/orthanc:24.1.2 : test_upload_multiple_studies_from_multiple_threads with 10 workers and 25 files and repeat 3x: 8.957 s
+osimis/orthanc:current: test_upload_multiple_studies_from_multiple_threads with 10 workers and 25 files and repeat 3x: 2.671 s
+
+Testing the connecions (note: Orthanc and PG server running on the same server)
+10 connections : test_concurrent_anonymize_same_study with 4 workers and 10x repeat: 15.744 s
+1  connection  : test_concurrent_anonymize_same_study with 4 workers and 10x repeat: 21.341 s
+10 connections : test_concurrent_uploads_same_study with 20 workers and 1x repeat: 6.57 s
+1  connection  : test_concurrent_uploads_same_study with 20 workers and 1x repeat: 10.223 s
+10 connections : test_upload_delete_same_study_from_multiple_threads with 5 workers and 3x repeat (10x): 7.129 s 
+1  connection  : test_upload_delete_same_study_from_multiple_threads with 5 workers and 3x repeat (10x): 11.172 s 
+
+
+TODO:
+- have a separate "thread" to UpdateStatistics ?
+
+- check https://discourse.orthanc-server.org/t/image-insert-are-too-slow-databse-performance-too-poor-when-using-mysql-mariadb/3820
+
+DONE:
+- implement a downgrade script ?  And test it in PotgresUpgrades integ tests
+- test the transfer plugin
+- perf tests: upload generated data (different studies)
+- In Docker images, re-enable MySQL & ODBC plugins + tests
+- reenable PatientRecyclingOrder
+- force the create/update DB transaction to be serializable (not needed: this is handled by POSTGRESQL_LOCK_DATABASE_SETUP)
+- PatientAddedFunc contains an IF  (check if other IF/THEN/ELSE pattern remains)
+- validate upgrade DB from previous Orthanc and from scratch
+- check minimal version of PG (9.5 - 9.6 ? for create index if not exists): seems to work with 9.5 cfr PotgresUpgrades integ tests
+- test events generation StableSeries .... (count the NewSeries, NewInstances event and make sure they match the numb)
--- a/Odbc/CMakeLists.txt	Wed Sep 13 13:32:54 2023 +0200
+++ b/Odbc/CMakeLists.txt	Mon Jan 22 10:00:28 2024 +0100
@@ -150,6 +150,9 @@
   COMPILE_FLAGS -DORTHANC_ENABLE_LOGGING_PLUGIN=1
   )
 
+DefineSourceBasenameForTarget(OrthancOdbcIndex)
+DefineSourceBasenameForTarget(OrthancOdbcStorage)
+
 install(
   TARGETS OrthancOdbcIndex OrthancOdbcStorage
   RUNTIME DESTINATION lib    # Destination for Windows
@@ -174,3 +177,5 @@
 set_target_properties(UnitTests PROPERTIES
   COMPILE_FLAGS -DORTHANC_ENABLE_LOGGING_PLUGIN=0
   )
+
+DefineSourceBasenameForTarget(UnitTests)
\ No newline at end of file
--- a/PostgreSQL/CMakeLists.txt	Wed Sep 13 13:32:54 2023 +0200
+++ b/PostgreSQL/CMakeLists.txt	Mon Jan 22 10:00:28 2024 +0100
@@ -77,11 +77,9 @@
 
 
 EmbedResources(
-  POSTGRESQL_PREPARE_INDEX          ${CMAKE_SOURCE_DIR}/Plugins/PrepareIndex.sql
-  POSTGRESQL_CREATE_INSTANCE        ${CMAKE_SOURCE_DIR}/Plugins/CreateInstance.sql
-  POSTGRESQL_FAST_TOTAL_SIZE        ${CMAKE_SOURCE_DIR}/Plugins/FastTotalSize.sql
-  POSTGRESQL_FAST_COUNT_RESOURCES   ${CMAKE_SOURCE_DIR}/Plugins/FastCountResources.sql
-  POSTGRESQL_GET_LAST_CHANGE_INDEX  ${CMAKE_SOURCE_DIR}/Plugins/GetLastChangeIndex.sql
+  POSTGRESQL_PREPARE_INDEX           ${CMAKE_SOURCE_DIR}/Plugins/SQL/PrepareIndex.sql
+  POSTGRESQL_UPGRADE_UNKNOWN_TO_V6_1 ${CMAKE_SOURCE_DIR}/Plugins/SQL/Upgrades/UnknownToV6.1.sql
+  POSTGRESQL_UPGRADE_V6_1_TO_V6_2    ${CMAKE_SOURCE_DIR}/Plugins/SQL/Upgrades/V6.1ToV6.2.sql
   )
 
 
@@ -155,6 +153,10 @@
   COMPILE_FLAGS -DORTHANC_ENABLE_LOGGING_PLUGIN=1
   )
 
+DefineSourceBasenameForTarget(OrthancPostgreSQLIndex)
+DefineSourceBasenameForTarget(OrthancPostgreSQLStorage)
+ 
+
 install(
   TARGETS OrthancPostgreSQLIndex OrthancPostgreSQLStorage
   RUNTIME DESTINATION lib    # Destination for Windows
@@ -178,3 +180,5 @@
 set_target_properties(UnitTests PROPERTIES
   COMPILE_FLAGS -DORTHANC_ENABLE_LOGGING_PLUGIN=0
   )
+
+DefineSourceBasenameForTarget(UnitTests)
--- a/PostgreSQL/NEWS	Wed Sep 13 13:32:54 2023 +0200
+++ b/PostgreSQL/NEWS	Mon Jan 22 10:00:28 2024 +0100
@@ -1,3 +1,19 @@
+Pending changes in the mainline
+===============================
+
+* Experimental debug feature:
+  Introduced a new configuration to select the transaction isolation level
+    "TransactionMode": "SERIALIZABLE"
+  Allowed values: "SERIALIZABLE", "READ COMMITTED", "DEFAULT".
+  The "SERIALIZABLE" mode was the only available value up to now.  It is still the default
+  value now.
+  The "READ COMMITTED" is possible now due to rewrites of SQL queries.
+  The "DEFAULT" value uses the default transaction isolation level defined at the database level.
+* internals:
+  - Added a UNIQUE constraint on Resources.publicId to detect DB inconsistencies
+* New "EnableVerboseLogs" configuration to show SQL statements being executed.
+
+
 Release 5.1 (2023-06-27)
 ========================
 
@@ -22,7 +38,7 @@
 * Support of multiple readers/writers, by handling retries from Orthanc SDK 1.9.2
 * Support of range reads for the storage area, from Orthanc SDK 1.9.0
 * Fix issue #193 (LSB binaries crash with PostgreSQL + SSL) by changeset
-  in OrthancFramework: https://hg.orthanc-server.com/orthanc/rev/9a9118406484
+  in OrthancFramework: https://orthanc.uclouvain.be/hg/orthanc/rev/9a9118406484
 * Fix issue #151 (Storage failures when running with two instances and PG_LOCK=false)
 
 
--- a/PostgreSQL/Plugins/CreateInstance.sql	Wed Sep 13 13:32:54 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,96 +0,0 @@
-CREATE FUNCTION CreateInstance(
-  IN patient TEXT,
-  IN study TEXT,
-  IN series TEXT,
-  IN instance TEXT,
-  OUT isNewPatient BIGINT,
-  OUT isNewStudy BIGINT,
-  OUT isNewSeries BIGINT,
-  OUT isNewInstance BIGINT,
-  OUT patientKey BIGINT,
-  OUT studyKey BIGINT,
-  OUT seriesKey BIGINT,
-  OUT instanceKey BIGINT) AS $body$
-
-DECLARE
-  patientSeq BIGINT;
-  countRecycling BIGINT;
-
-BEGIN
-  SELECT internalId FROM Resources INTO instanceKey WHERE publicId = instance AND resourceType = 3;
-
-  IF NOT (instanceKey IS NULL) THEN
-    -- This instance already exists, stop here
-    isNewInstance := 0;
-  ELSE
-    SELECT internalId FROM Resources INTO patientKey WHERE publicId = patient AND resourceType = 0;
-    SELECT internalId FROM Resources INTO studyKey WHERE publicId = study AND resourceType = 1;
-    SELECT internalId FROM Resources INTO seriesKey WHERE publicId = series AND resourceType = 2;
-
-    IF patientKey IS NULL THEN
-      -- Must create a new patient
-      IF NOT (studyKey IS NULL AND seriesKey IS NULL AND instanceKey IS NULL) THEN
-         RAISE EXCEPTION 'Broken invariant';
-      END IF;
-
-      INSERT INTO Resources VALUES (DEFAULT, 0, patient, NULL) RETURNING internalId INTO patientKey;
-      isNewPatient := 1;
-    ELSE
-      isNewPatient := 0;
-    END IF;
-  
-    IF (patientKey IS NULL) THEN
-       RAISE EXCEPTION 'Broken invariant';
-    END IF;
-
-    IF studyKey IS NULL THEN
-      -- Must create a new study
-      IF NOT (seriesKey IS NULL AND instanceKey IS NULL) THEN
-         RAISE EXCEPTION 'Broken invariant';
-      END IF;
-
-      INSERT INTO Resources VALUES (DEFAULT, 1, study, patientKey) RETURNING internalId INTO studyKey;
-      isNewStudy := 1;
-    ELSE
-      isNewStudy := 0;
-    END IF;
-
-    IF (studyKey IS NULL) THEN
-       RAISE EXCEPTION 'Broken invariant';
-    END IF;
-
-    IF seriesKey IS NULL THEN
-      -- Must create a new series
-      IF NOT (instanceKey IS NULL) THEN
-         RAISE EXCEPTION 'Broken invariant';
-      END IF;
-
-      INSERT INTO Resources VALUES (DEFAULT, 2, series, studyKey) RETURNING internalId INTO seriesKey;
-      isNewSeries := 1;
-    ELSE
-      isNewSeries := 0;
-    END IF;
-  
-    IF (seriesKey IS NULL OR NOT instanceKey IS NULL) THEN
-       RAISE EXCEPTION 'Broken invariant';
-    END IF;
-
-    INSERT INTO Resources VALUES (DEFAULT, 3, instance, seriesKey) RETURNING internalId INTO instanceKey;
-    isNewInstance := 1;
-
-    -- Move the patient to the end of the recycling order
-    SELECT seq FROM PatientRecyclingOrder WHERE patientId = patientKey INTO patientSeq;
-
-    IF NOT (patientSeq IS NULL) THEN
-       -- The patient is not protected
-       SELECT COUNT(*) FROM (SELECT * FROM PatientRecyclingOrder WHERE seq >= patientSeq LIMIT 2) AS tmp INTO countRecycling;
-       IF countRecycling = 2 THEN
-          -- The patient was not at the end of the recycling order
-          DELETE FROM PatientRecyclingOrder WHERE seq = patientSeq;
-          INSERT INTO PatientRecyclingOrder VALUES(DEFAULT, patientKey);
-       END IF;
-    END IF;
-  END IF;  
-END;
-
-$body$ LANGUAGE plpgsql;
--- a/PostgreSQL/Plugins/FastCountResources.sql	Wed Sep 13 13:32:54 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,33 +0,0 @@
--- https://wiki.postgresql.org/wiki/Count_estimate
-
-INSERT INTO GlobalIntegers
-SELECT 2, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 0;  -- Count patients
-
-INSERT INTO GlobalIntegers
-SELECT 3, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 1;  -- Count studies
-
-INSERT INTO GlobalIntegers
-SELECT 4, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 2;  -- Count series
-
-INSERT INTO GlobalIntegers
-SELECT 5, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 3;  -- Count instances
-
-
-CREATE OR REPLACE FUNCTION CountResourcesTrackerFunc()
-RETURNS TRIGGER AS $$
-BEGIN
-  IF TG_OP = 'INSERT' THEN
-    UPDATE GlobalIntegers SET value = value + 1 WHERE key = new.resourceType + 2;
-    RETURN new;
-  ELSIF TG_OP = 'DELETE' THEN
-    UPDATE GlobalIntegers SET value = value - 1 WHERE key = old.resourceType + 2;
-    RETURN old;
-  END IF;
-END;
-$$ LANGUAGE plpgsql;
-
-
-CREATE TRIGGER CountResourcesTracker
-AFTER INSERT OR DELETE ON Resources
-FOR EACH ROW
-EXECUTE PROCEDURE CountResourcesTrackerFunc();
--- a/PostgreSQL/Plugins/FastTotalSize.sql	Wed Sep 13 13:32:54 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-CREATE TABLE GlobalIntegers(
-       key INTEGER PRIMARY KEY,
-       value BIGINT);
-
-INSERT INTO GlobalIntegers
-SELECT 0, CAST(COALESCE(SUM(compressedSize), 0) AS BIGINT) FROM AttachedFiles;
-
-INSERT INTO GlobalIntegers
-SELECT 1, CAST(COALESCE(SUM(uncompressedSize), 0) AS BIGINT) FROM AttachedFiles;
-
-
-
-CREATE FUNCTION AttachedFileIncrementSizeFunc() 
-RETURNS TRIGGER AS $body$
-BEGIN
-  UPDATE GlobalIntegers SET value = value + new.compressedSize WHERE key = 0;
-  UPDATE GlobalIntegers SET value = value + new.uncompressedSize WHERE key = 1;
-  RETURN NULL;
-END;
-$body$ LANGUAGE plpgsql;
-
-CREATE FUNCTION AttachedFileDecrementSizeFunc() 
-RETURNS TRIGGER AS $body$
-BEGIN
-  UPDATE GlobalIntegers SET value = value - old.compressedSize WHERE key = 0;
-  UPDATE GlobalIntegers SET value = value - old.uncompressedSize WHERE key = 1;
-  RETURN NULL;
-END;
-$body$ LANGUAGE plpgsql;
-
-
-
-CREATE TRIGGER AttachedFileIncrementSize
-AFTER INSERT ON AttachedFiles
-FOR EACH ROW
-EXECUTE PROCEDURE AttachedFileIncrementSizeFunc();
-
-CREATE TRIGGER AttachedFileDecrementSize
-AFTER DELETE ON AttachedFiles
-FOR EACH ROW
-EXECUTE PROCEDURE AttachedFileDecrementSizeFunc();
--- a/PostgreSQL/Plugins/GetLastChangeIndex.sql	Wed Sep 13 13:32:54 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,27 +0,0 @@
--- In PostgreSQL, the most straightforward query would be to run:
-
---   SELECT currval(pg_get_serial_sequence('Changes', 'seq'))".
-
--- Unfortunately, this raises the error message "currval of sequence
--- "changes_seq_seq" is not yet defined in this session" if no change
--- has been inserted before the SELECT. We thus track the sequence
--- index with a trigger.
--- http://www.neilconway.org/docs/sequences/
-
-INSERT INTO GlobalIntegers
-SELECT 6, CAST(COALESCE(MAX(seq), 0) AS BIGINT) FROM Changes;
-
-
-CREATE FUNCTION InsertedChangeFunc() 
-RETURNS TRIGGER AS $body$
-BEGIN
-  UPDATE GlobalIntegers SET value = new.seq WHERE key = 6;
-  RETURN NULL;
-END;
-$body$ LANGUAGE plpgsql;
-
-
-CREATE TRIGGER InsertedChange
-AFTER INSERT ON Changes
-FOR EACH ROW
-EXECUTE PROCEDURE InsertedChangeFunc();
--- a/PostgreSQL/Plugins/PostgreSQLDefinitions.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/PostgreSQL/Plugins/PostgreSQLDefinitions.h	Mon Jan 22 10:00:28 2024 +0100
@@ -48,3 +48,10 @@
  * https://groups.google.com/d/msg/orthanc-users/yV3LSTh_TjI/h3PRApJFBAAJ
  **/
 static const int32_t POSTGRESQL_LOCK_DATABASE_SETUP = 44;
+
+/**
+ * Transient advisory lock to protect the instance creation,
+ * because it is not 100% resilient to concurrency in, e.g, READ COMIITED 
+ * transaction isolation level.
+ **/
+static const int32_t POSTGRESQL_LOCK_CREATE_INSTANCE = 45;
--- a/PostgreSQL/Plugins/PostgreSQLIndex.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/PostgreSQL/Plugins/PostgreSQLIndex.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -30,6 +30,7 @@
 #include <EmbeddedResources.h>  // Auto-generated file
 
 #include <Compatibility.h>  // For std::unique_ptr<>
+#include <Toolbox.h>
 #include <Logging.h>
 #include <OrthancException.h>
 
@@ -60,6 +61,14 @@
     return PostgreSQLDatabase::CreateDatabaseFactory(parameters_);
   }
 
+  void PostgreSQLIndex::ApplyPrepareIndex(DatabaseManager::Transaction& t, DatabaseManager& manager)
+  {
+    std::string query;
+
+    Orthanc::EmbeddedResources::GetFileResource
+      (query, Orthanc::EmbeddedResources::POSTGRESQL_PREPARE_INDEX);
+    t.GetDatabaseTransaction().ExecuteMultiLines(query);
+  }
   
   void PostgreSQLIndex::ConfigureDatabase(DatabaseManager& manager,
                                           bool hasIdentifierTags,
@@ -76,7 +85,7 @@
     if (expectedVersion != 6)
     {
       LOG(ERROR) << "This database plugin is incompatible with your version of Orthanc "
-                 << "expecting the DB schema version " << expectedVersion 
+                 << "expecting the Orthanc DB schema version " << expectedVersion 
                  << ", but this plugin is only compatible with version 6";
       throw Orthanc::OrthancException(Orthanc::ErrorCode_Plugin);
     }
@@ -89,6 +98,7 @@
     }
 
     {
+      // lock the full DB while checking if it needs to be create/ugraded
       PostgreSQLDatabase::TransientAdvisoryLock lock(db, POSTGRESQL_LOCK_DATABASE_SETUP);
 
       if (clearAll_)
@@ -101,218 +111,98 @@
 
         if (!t.GetDatabaseTransaction().DoesTableExist("Resources"))
         {
-          std::string query;
-
-          Orthanc::EmbeddedResources::GetFileResource
-            (query, Orthanc::EmbeddedResources::POSTGRESQL_PREPARE_INDEX);
-          t.GetDatabaseTransaction().ExecuteMultiLines(query);
-
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabaseSchemaVersion, expectedVersion);
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabasePatchLevel, 1);
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_HasTrigramIndex, 0);
-        }
-          
-        if (!t.GetDatabaseTransaction().DoesTableExist("Resources"))
-        {
-          LOG(ERROR) << "Corrupted PostgreSQL database";
-          throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);        
-        }
+          LOG(WARNING) << "PostgreSQL is creating the database schema";
 
-        int version = 0;
-        if (!LookupGlobalIntegerProperty(version, manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabaseSchemaVersion) ||
-            version != 6)
-        {
-          LOG(ERROR) << "PostgreSQL plugin is incompatible with database schema version: " << version;
-          throw Orthanc::OrthancException(Orthanc::ErrorCode_Database);        
-        }
-
-        int revision;
-        if (!LookupGlobalIntegerProperty(revision, manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabasePatchLevel))
-        {
-          revision = 1;
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabasePatchLevel, revision);
-        }
-
-        if (revision != 1)
-        {
-          LOG(ERROR) << "PostgreSQL plugin is incompatible with database schema revision: " << revision;
-          throw Orthanc::OrthancException(Orthanc::ErrorCode_Database);        
-        }
+          ApplyPrepareIndex(t, manager);
 
-        t.Commit();
-      }
-
-      {
-        DatabaseManager::Transaction t(manager, TransactionType_ReadWrite);
-
-        int hasTrigram = 0;
-        if (!LookupGlobalIntegerProperty(hasTrigram, manager, MISSING_SERVER_IDENTIFIER,
-                                         Orthanc::GlobalProperty_HasTrigramIndex) ||
-            hasTrigram != 1)
-        {
-          /**
-           * Apply fix for performance issue (speed up wildcard search
-           * by using GIN trigrams). This implements the patch suggested
-           * in issue #47, BUT we also keep the original
-           * "DicomIdentifiersIndexValues", as it leads to better
-           * performance for "strict" searches (i.e. searches involving
-           * no wildcard).
-           * https://www.postgresql.org/docs/current/static/pgtrgm.html
-           * https://bugs.orthanc-server.com/show_bug.cgi?id=47
-           **/
-          try
+          if (!t.GetDatabaseTransaction().DoesTableExist("Resources"))
           {
-            // We've observed 9 minutes on DB with 100000 studies
-            LOG(WARNING) << "Trying to enable trigram matching on the PostgreSQL database "
-                         << "to speed up wildcard searches. This may take several minutes";
-
-            t.GetDatabaseTransaction().ExecuteMultiLines(
-              "CREATE EXTENSION IF NOT EXISTS pg_trgm; "
-              "CREATE INDEX DicomIdentifiersIndexValues2 ON DicomIdentifiers USING gin(value gin_trgm_ops);");
-
-            SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_HasTrigramIndex, 1);
-            LOG(WARNING) << "Trigram index has been created";
-
-            t.Commit();
-          }
-          catch (Orthanc::OrthancException&)
-          {
-            LOG(WARNING) << "Performance warning: Your PostgreSQL server does "
-                         << "not support trigram matching";
-            LOG(WARNING) << "-> Consider installing the \"pg_trgm\" extension on the "
-                         << "PostgreSQL server, e.g. on Debian: sudo apt install postgresql-contrib";
+            LOG(ERROR) << "Corrupted PostgreSQL database or failed to create the database schema";
+            throw Orthanc::OrthancException(Orthanc::ErrorCode_InternalError);        
           }
         }
         else
         {
-          t.Commit();
-        }
-      }
-
-      {
-        DatabaseManager::Transaction t(manager, TransactionType_ReadWrite);
-
-        int property = 0;
-        if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
-                                         Orthanc::GlobalProperty_HasCreateInstance) ||
-            property != 2)
-        {
-          LOG(INFO) << "Installing the CreateInstance extension";
+          LOG(WARNING) << "The database schema already exists, checking if it needs to be updated";
 
-          if (property == 1)
+          int version = 0;
+          if (!LookupGlobalIntegerProperty(version, manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabaseSchemaVersion) ||
+              version != 6)
           {
-            // Drop older, experimental versions of this extension
-            t.GetDatabaseTransaction().ExecuteMultiLines("DROP FUNCTION CreateInstance("
-                                                         "IN patient TEXT, IN study TEXT, IN series TEXT, in instance TEXT)");
+            LOG(ERROR) << "PostgreSQL plugin is incompatible with Orthanc database schema version: " << version;
+            throw Orthanc::OrthancException(Orthanc::ErrorCode_Database);        
           }
-        
-          std::string query;
-          Orthanc::EmbeddedResources::GetFileResource
-            (query, Orthanc::EmbeddedResources::POSTGRESQL_CREATE_INSTANCE);
-          t.GetDatabaseTransaction().ExecuteMultiLines(query);
 
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_HasCreateInstance, 2);
-        }
+          bool needToRunUpgradeFromUnknownToV1 = false;
+          bool needToRunUpgradeV1toV2 = false;
 
-      
-        if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
-                                         Orthanc::GlobalProperty_GetTotalSizeIsFast) ||
-            property != 1)
-        {
-          LOG(INFO) << "Installing the FastTotalSize extension";
-
-          std::string query;
-          Orthanc::EmbeddedResources::GetFileResource
-            (query, Orthanc::EmbeddedResources::POSTGRESQL_FAST_TOTAL_SIZE);
-          t.GetDatabaseTransaction().ExecuteMultiLines(query);
-
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_GetTotalSizeIsFast, 1);
-        }
-
+          int revision;
+          if (!LookupGlobalIntegerProperty(revision, manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_DatabasePatchLevel))
+          {
+            LOG(WARNING) << "No DatabasePatchLevel found, assuming it's 1";
+            revision = 1;
+            needToRunUpgradeFromUnknownToV1 = true;
+            needToRunUpgradeV1toV2 = true;
+          }
+          else if (revision == 1)
+          {
+            needToRunUpgradeV1toV2 = true;
+          }
 
-        // Installing this extension requires the "GlobalIntegers" table
-        // created by the "FastTotalSize" extension
-        property = 0;
-        if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
-                                         Orthanc::GlobalProperty_HasFastCountResources) ||
-            property != 1)
-        {
-          LOG(INFO) << "Installing the FastCountResources extension";
-
-          std::string query;
-          Orthanc::EmbeddedResources::GetFileResource
-            (query, Orthanc::EmbeddedResources::POSTGRESQL_FAST_COUNT_RESOURCES);
-          t.GetDatabaseTransaction().ExecuteMultiLines(query);
-
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_HasFastCountResources, 1);
-        }
-
+          int hasTrigram = 0;
+          if (!LookupGlobalIntegerProperty(hasTrigram, manager, MISSING_SERVER_IDENTIFIER,
+                                           Orthanc::GlobalProperty_HasTrigramIndex) || 
+              hasTrigram != 1)
+          {
+            // We've observed 9 minutes on DB with 100000 studies
+            LOG(WARNING) << "The DB schema update will try to enable trigram matching on the PostgreSQL database "
+                         << "to speed up wildcard searches. This may take several minutes";
+            needToRunUpgradeV1toV2 = true;
+          }
 
-        // Installing this extension requires the "GlobalIntegers" table
-        // created by the "GetLastChangeIndex" extension
-        property = 0;
-        if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
-                                         Orthanc::GlobalProperty_GetLastChangeIndex) ||
-            property != 1)
-        {
-          LOG(INFO) << "Installing the GetLastChangeIndex extension";
-
-          std::string query;
-          Orthanc::EmbeddedResources::GetFileResource
-            (query, Orthanc::EmbeddedResources::POSTGRESQL_GET_LAST_CHANGE_INDEX);
-          t.GetDatabaseTransaction().ExecuteMultiLines(query);
-
-          SetGlobalIntegerProperty(manager, MISSING_SERVER_IDENTIFIER, Orthanc::GlobalProperty_GetLastChangeIndex, 1);
-        }
-
-        t.Commit();
-      }
- 
-
-      {
-        // New in release 4.0 to deal with multiple writers
-        DatabaseManager::Transaction t(manager, TransactionType_ReadWrite);
-
-        if (!t.GetDatabaseTransaction().DoesTableExist("ServerProperties"))
-        {
-          t.GetDatabaseTransaction().ExecuteMultiLines("CREATE TABLE ServerProperties(server VARCHAR(64) NOT NULL, "
-                                                       "property INTEGER, value TEXT, PRIMARY KEY(server, property))");
-        }
+          int property = 0;
+          if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
+                                           Orthanc::GlobalProperty_HasFastCountResources) ||
+              property != 1)
+          {
+            needToRunUpgradeV1toV2 = true;
+          }
+          if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
+                                          Orthanc::GlobalProperty_GetTotalSizeIsFast) ||
+              property != 1)
+          {
+            needToRunUpgradeV1toV2 = true;
+          }
+          if (!LookupGlobalIntegerProperty(property, manager, MISSING_SERVER_IDENTIFIER,
+                                          Orthanc::GlobalProperty_GetLastChangeIndex) ||
+              property != 1)
+          {
+            needToRunUpgradeV1toV2 = true;
+          }
 
-        /**
-         * PostgreSQL 9.5: "Adding a column with a default requires
-         * updating each row of the table (to store the new column
-         * value). However, if no default is specified, PostgreSQL is
-         * able to avoid the physical update." => We set no default
-         * for performance (older entries will be NULL)
-         * https://www.postgresql.org/docs/9.5/ddl-alter.html
-         **/
-        if (!db.DoesColumnExist("Metadata", "revision"))
-        {
-          t.GetDatabaseTransaction().ExecuteMultiLines("ALTER TABLE Metadata ADD COLUMN revision INTEGER");
-        }
+          if (needToRunUpgradeFromUnknownToV1)
+          {
+            LOG(WARNING) << "Upgrading DB schema from unknown to revision 1";
+            std::string query;
 
-        if (!db.DoesColumnExist("AttachedFiles", "revision"))
-        {
-          t.GetDatabaseTransaction().ExecuteMultiLines("ALTER TABLE AttachedFiles ADD COLUMN revision INTEGER");
-        }
-
-        t.Commit();
-      }
- 
+            Orthanc::EmbeddedResources::GetFileResource
+              (query, Orthanc::EmbeddedResources::POSTGRESQL_UPGRADE_UNKNOWN_TO_V6_1);
+            t.GetDatabaseTransaction().ExecuteMultiLines(query);
+          }
+          
+          if (needToRunUpgradeV1toV2)
+          {
+            LOG(WARNING) << "Upgrading DB schema from revision 1 to revision 2";
 
-      {
-        // New in release 5.0 to deal with labels
-        DatabaseManager::Transaction t(manager, TransactionType_ReadWrite);
+            std::string query;
 
-        if (!t.GetDatabaseTransaction().DoesTableExist("Labels"))
-        {
-          t.GetDatabaseTransaction().ExecuteMultiLines(
-            "CREATE TABLE Labels("
-            "id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,"
-            "label TEXT, PRIMARY KEY(id, label));"
-            "CREATE INDEX LabelsIndex1 ON LABELS(id);"
-            "CREATE INDEX LabelsIndex2 ON LABELS(label);");
+            Orthanc::EmbeddedResources::GetFileResource
+              (query, Orthanc::EmbeddedResources::POSTGRESQL_UPGRADE_V6_1_TO_V6_2);
+            t.GetDatabaseTransaction().ExecuteMultiLines(query);
+
+            // apply all idempotent changes that are in the PrepareIndexV2
+            ApplyPrepareIndex(t, manager);
+          }
         }
 
         t.Commit();
@@ -358,7 +248,8 @@
       result = static_cast<uint64_t>(statement.ReadInteger64(0));
     }
     
-    assert(result == IndexBackend::GetTotalCompressedSize(manager));
+    // disabled because this is not alway true while transactions are being executed in READ COMITTED TRANSACTION.  This is however true when no files are being delete/added
+    //assert(result == IndexBackend::GetTotalCompressedSize(manager));
     return result;
   }
 
@@ -379,10 +270,163 @@
       result = static_cast<uint64_t>(statement.ReadInteger64(0));
     }
     
-    assert(result == IndexBackend::GetTotalUncompressedSize(manager));
+    // disabled because this is not alway true while transactions are being executed in READ COMITTED TRANSACTION.  This is however true when no files are being delete/added
+    // assert(result == IndexBackend::GetTotalUncompressedSize(manager));
     return result;
   }
 
+  int64_t PostgreSQLIndex::IncrementGlobalProperty(DatabaseManager& manager,
+                                                   const char* serverIdentifier,
+                                                   int32_t property,
+                                                   int64_t increment)
+  {
+    if (serverIdentifier == NULL)
+    {
+      throw Orthanc::OrthancException(Orthanc::ErrorCode_NullPointer);
+    }
+    else
+    {
+      if (strlen(serverIdentifier) == 0)
+      {
+        DatabaseManager::CachedStatement statement(
+          STATEMENT_FROM_HERE, manager,
+          "INSERT INTO GlobalProperties (property, value) VALUES(${property}, ${increment}) "
+          "  ON CONFLICT (property) DO UPDATE SET value = CAST(GlobalProperties.value AS BIGINT) + ${increment}"
+          " RETURNING CAST(value AS BIGINT)");
+
+        statement.SetParameterType("property", ValueType_Integer64);
+        statement.SetParameterType("increment", ValueType_Integer64);
+
+        Dictionary args;
+        args.SetIntegerValue("property", property);
+        args.SetIntegerValue("increment", increment);
+        
+        statement.Execute(args);
+
+        return statement.ReadInteger64(0);
+      }
+      else
+      {
+        DatabaseManager::CachedStatement statement(
+          STATEMENT_FROM_HERE, manager,
+          "INSERT INTO ServerProperties (server, property, value) VALUES(${server}, ${property}, ${increment}) "
+          "  ON CONFLICT (server, property) DO UPDATE SET value = CAST(ServerProperties.value AS BIGINT) + ${increment}"
+          " RETURNING CAST(value AS BIGINT)");
+
+        statement.SetParameterType("server", ValueType_Utf8String);
+        statement.SetParameterType("property", ValueType_Integer64);
+        statement.SetParameterType("increment", ValueType_Integer64);
+
+        Dictionary args;
+        args.SetUtf8Value("server", serverIdentifier);
+        args.SetIntegerValue("property", property);
+        args.SetIntegerValue("increment", increment);
+        
+        statement.Execute(args);
+
+        return statement.ReadInteger64(0);
+      }
+    }
+  }
+
+  void PostgreSQLIndex::UpdateAndGetStatistics(DatabaseManager& manager,
+                                               int64_t& patientsCount,
+                                               int64_t& studiesCount,
+                                               int64_t& seriesCount,
+                                               int64_t& instancesCount,
+                                               int64_t& compressedSize,
+                                               int64_t& uncompressedSize)
+  {
+    DatabaseManager::CachedStatement statement(
+      STATEMENT_FROM_HERE, manager,
+      "SELECT * FROM UpdateStatistics()");
+
+    statement.Execute();
+
+    patientsCount = statement.ReadInteger64(0);
+    studiesCount = statement.ReadInteger64(1);
+    seriesCount = statement.ReadInteger64(2);
+    instancesCount = statement.ReadInteger64(3);
+    compressedSize = statement.ReadInteger64(4);
+    uncompressedSize = statement.ReadInteger64(5);
+  }
+
+  void PostgreSQLIndex::ClearDeletedFiles(DatabaseManager& manager)
+  {
+    { // note: the temporary table lifespan is the session, not the transaction -> that's why we need the IF NOT EXISTS
+      DatabaseManager::CachedStatement statement(
+        STATEMENT_FROM_HERE, manager,
+        "SELECT CreateDeletedFilesTemporaryTable()"
+        );
+      statement.ExecuteWithoutResult();
+    }
+
+  }
+
+  void PostgreSQLIndex::ClearDeletedResources(DatabaseManager& manager)
+  {
+    { // note: the temporary table lifespan is the session, not the transaction -> that's why we need the IF NOT EXISTS
+      DatabaseManager::CachedStatement statement(
+        STATEMENT_FROM_HERE, manager,
+        "CREATE TEMPORARY TABLE IF NOT EXISTS  DeletedResources("
+        "resourceType INTEGER NOT NULL,"
+        "publicId VARCHAR(64) NOT NULL"
+        ");"
+        );
+      statement.Execute();
+    }
+    {
+      DatabaseManager::CachedStatement statement(
+        STATEMENT_FROM_HERE, manager,
+        "DELETE FROM DeletedResources;"
+        );
+
+      statement.Execute();
+    }
+
+  }
+
+  void PostgreSQLIndex::ClearRemainingAncestor(DatabaseManager& manager)
+  {
+  }
+
+  void PostgreSQLIndex::DeleteResource(IDatabaseBackendOutput& output,
+                                       DatabaseManager& manager,
+                                       int64_t id)
+  {
+    // clearing of temporary table is now implemented in the funcion DeleteResource
+    DatabaseManager::CachedStatement statement(
+      STATEMENT_FROM_HERE, manager,
+      "SELECT * FROM DeleteResource(${id})");
+
+    statement.SetParameterType("id", ValueType_Integer64);
+
+    Dictionary args;
+    args.SetIntegerValue("id", id);
+
+    statement.Execute(args);
+
+    if (statement.IsDone() ||
+        statement.GetResultFieldsCount() != 2)
+    {
+      throw Orthanc::OrthancException(Orthanc::ErrorCode_Database);
+    }
+
+    statement.SetResultFieldType(0, ValueType_Integer64);
+    statement.SetResultFieldType(1, ValueType_Utf8String);
+
+    if (!statement.IsNull(0))
+    {
+      output.SignalRemainingAncestor(
+        statement.ReadString(1),
+        static_cast<OrthancPluginResourceType>(statement.ReadInteger32(0)));
+    }
+
+    SignalDeletedFiles(output, manager);
+    SignalDeletedResources(output, manager);
+  }
+
+
 
 #if ORTHANC_PLUGINS_HAS_DATABASE_CONSTRAINT == 1
   void PostgreSQLIndex::CreateInstance(OrthancPluginCreateInstanceResult& result,
@@ -406,7 +450,7 @@
     args.SetUtf8Value("study", hashStudy);
     args.SetUtf8Value("series", hashSeries);
     args.SetUtf8Value("instance", hashInstance);
-    
+
     statement.Execute(args);
 
     if (statement.IsDone() ||
@@ -420,6 +464,8 @@
       statement.SetResultFieldType(i, ValueType_Integer64);
     }
 
+    // LOG(INFO) << statement.ReadInteger64(0) << statement.ReadInteger64(1) << statement.ReadInteger64(2) << statement.ReadInteger64(3);
+
     result.isNewInstance = (statement.ReadInteger64(3) == 1);
     result.instanceId = statement.ReadInteger64(7);
 
@@ -436,6 +482,133 @@
 #endif
 
 
+#if ORTHANC_PLUGINS_HAS_DATABASE_CONSTRAINT == 1
+  static void ExecuteSetResourcesContentTags(
+    DatabaseManager& manager,
+    const std::string& table,
+    const std::string& variablePrefix,
+    uint32_t count,
+    const OrthancPluginResourcesContentTags* tags)
+  {
+    std::string sql;
+    Dictionary args;
+    
+    for (uint32_t i = 0; i < count; i++)
+    {
+      std::string name = variablePrefix + boost::lexical_cast<std::string>(i);
+
+      args.SetUtf8Value(name, tags[i].value);
+      
+      std::string insert = ("(" + boost::lexical_cast<std::string>(tags[i].resource) + ", " +
+                            boost::lexical_cast<std::string>(tags[i].group) + ", " +
+                            boost::lexical_cast<std::string>(tags[i].element) + ", " +
+                            "${" + name + "})");
+
+      if (sql.empty())
+      {
+        sql = "INSERT INTO " + table + " VALUES " + insert;
+      }
+      else
+      {
+        sql += ", " + insert;
+      }
+    }
+
+    if (!sql.empty())
+    {
+      DatabaseManager::StandaloneStatement statement(manager, sql);
+
+      for (uint32_t i = 0; i < count; i++)
+      {
+        statement.SetParameterType(variablePrefix + boost::lexical_cast<std::string>(i),
+                                   ValueType_Utf8String);
+      }
+
+      statement.Execute(args);
+    }
+  }
+#endif
+  
+
+#if ORTHANC_PLUGINS_HAS_DATABASE_CONSTRAINT == 1
+  static void ExecuteSetResourcesContentMetadata(
+    DatabaseManager& manager,
+    bool hasRevisionsSupport,
+    uint32_t count,
+    const OrthancPluginResourcesContentMetadata* metadata)
+  {
+    if (count < 1)
+    {
+      return;
+    }
+
+    std::vector<std::string> resourceIds;
+    std::vector<std::string> metadataTypes;
+    std::vector<std::string> metadataValues;
+    std::vector<std::string> revisions;
+
+    Dictionary args;
+    
+    for (uint32_t i = 0; i < count; i++)
+    {
+      std::string argName = "m" + boost::lexical_cast<std::string>(i);
+
+      args.SetUtf8Value(argName, metadata[i].value);
+
+      resourceIds.push_back(boost::lexical_cast<std::string>(metadata[i].resource));
+      metadataTypes.push_back(boost::lexical_cast<std::string>(metadata[i].metadata));
+      metadataValues.push_back("${" + argName + "}");
+      revisions.push_back("0");
+    }
+
+    std::string joinedResourceIds;
+    std::string joinedMetadataTypes;
+    std::string joinedMetadataValues;
+    std::string joinedRevisions;
+
+    Orthanc::Toolbox::JoinStrings(joinedResourceIds, resourceIds, ",");
+    Orthanc::Toolbox::JoinStrings(joinedMetadataTypes, metadataTypes, ",");
+    Orthanc::Toolbox::JoinStrings(joinedMetadataValues, metadataValues, ",");
+    Orthanc::Toolbox::JoinStrings(joinedRevisions, revisions, ",");
+
+    std::string sql = std::string("SELECT InsertOrUpdateMetadata(ARRAY[") + 
+                                  joinedResourceIds + "], ARRAY[" + 
+                                  joinedMetadataTypes + "], ARRAY[" + 
+                                  joinedMetadataValues + "], ARRAY[" + 
+                                  joinedRevisions + "])";
+
+    DatabaseManager::StandaloneStatement statement(manager, sql);
+
+    for (uint32_t i = 0; i < count; i++)
+    {
+      statement.SetParameterType("m" + boost::lexical_cast<std::string>(i),
+                                  ValueType_Utf8String);
+    }
+
+    statement.Execute(args);
+  }
+#endif
+
+
+  void PostgreSQLIndex::SetResourcesContent(DatabaseManager& manager,
+                                     uint32_t countIdentifierTags,
+                                     const OrthancPluginResourcesContentTags* identifierTags,
+                                     uint32_t countMainDicomTags,
+                                     const OrthancPluginResourcesContentTags* mainDicomTags,
+                                     uint32_t countMetadata,
+                                     const OrthancPluginResourcesContentMetadata* metadata)
+  {
+    ExecuteSetResourcesContentTags(manager, "DicomIdentifiers", "i",
+                                   countIdentifierTags, identifierTags);
+
+    ExecuteSetResourcesContentTags(manager, "MainDicomTags", "t",
+                                   countMainDicomTags, mainDicomTags);
+    
+    ExecuteSetResourcesContentMetadata(manager, HasRevisionsSupport(), countMetadata, metadata);
+
+  }
+
+
   uint64_t PostgreSQLIndex::GetResourcesCount(DatabaseManager& manager,
                                               OrthancPluginResourceType resourceType)
   {
@@ -466,7 +639,9 @@
       result = static_cast<uint64_t>(statement.ReadInteger64(0));
     }
       
+    // disabled because this is not alway true while transactions are being executed in READ COMITTED TRANSACTION.  This is however true when no files are being delete/added
     assert(result == IndexBackend::GetResourcesCount(manager, resourceType));
+
     return result;
   }
 
--- a/PostgreSQL/Plugins/PostgreSQLIndex.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/PostgreSQL/Plugins/PostgreSQLIndex.h	Mon Jan 22 10:00:28 2024 +0100
@@ -33,6 +33,15 @@
     PostgreSQLParameters   parameters_;
     bool                   clearAll_;
 
+  protected:
+    virtual void ClearDeletedFiles(DatabaseManager& manager);
+
+    virtual void ClearDeletedResources(DatabaseManager& manager);
+
+    virtual void ClearRemainingAncestor(DatabaseManager& manager);
+
+    void ApplyPrepareIndex(DatabaseManager::Transaction& t, DatabaseManager& manager);
+
   public:
     PostgreSQLIndex(OrthancPluginContext* context,
                     const PostgreSQLParameters& parameters);
@@ -55,8 +64,19 @@
     
     virtual int64_t CreateResource(DatabaseManager& manager,
                                    const char* publicId,
-                                   OrthancPluginResourceType type)
-      ORTHANC_OVERRIDE;
+                                   OrthancPluginResourceType type) ORTHANC_OVERRIDE;
+
+    virtual void DeleteResource(IDatabaseBackendOutput& output,
+                                DatabaseManager& manager,
+                                int64_t id) ORTHANC_OVERRIDE;
+
+    virtual void SetResourcesContent(DatabaseManager& manager,
+                                     uint32_t countIdentifierTags,
+                                     const OrthancPluginResourcesContentTags* identifierTags,
+                                     uint32_t countMainDicomTags,
+                                     const OrthancPluginResourcesContentTags* mainDicomTags,
+                                     uint32_t countMetadata,
+                                     const OrthancPluginResourcesContentMetadata* metadata) ORTHANC_OVERRIDE;
 
     virtual uint64_t GetTotalCompressedSize(DatabaseManager& manager) ORTHANC_OVERRIDE;
 
@@ -90,5 +110,29 @@
     {
       return true;
     }
+
+    virtual bool HasAtomicIncrementGlobalProperty() ORTHANC_OVERRIDE
+    {
+      return true;
+    }
+
+    virtual int64_t IncrementGlobalProperty(DatabaseManager& manager,
+                                            const char* serverIdentifier,
+                                            int32_t property,
+                                            int64_t increment) ORTHANC_OVERRIDE;
+
+    virtual bool HasUpdateAndGetStatistics() ORTHANC_OVERRIDE
+    {
+      return true;
+    }
+
+    virtual void UpdateAndGetStatistics(DatabaseManager& manager,
+                                        int64_t& patientsCount,
+                                        int64_t& studiesCount,
+                                        int64_t& seriesCount,
+                                        int64_t& instancesCount,
+                                        int64_t& compressedSize,
+                                        int64_t& uncompressedSize) ORTHANC_OVERRIDE;
+
   };
 }
--- a/PostgreSQL/Plugins/PrepareIndex.sql	Wed Sep 13 13:32:54 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,168 +0,0 @@
-CREATE TABLE GlobalProperties(
-       property INTEGER PRIMARY KEY,
-       value TEXT
-       );
-
-CREATE TABLE Resources(
-       internalId BIGSERIAL NOT NULL PRIMARY KEY,
-       resourceType INTEGER NOT NULL,
-       publicId VARCHAR(64) NOT NULL,
-       parentId BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE
-       );
-
-CREATE TABLE MainDicomTags(
-       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
-       tagGroup INTEGER,
-       tagElement INTEGER,
-       value TEXT,
-       PRIMARY KEY(id, tagGroup, tagElement)
-       );
-
-CREATE TABLE DicomIdentifiers(
-       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
-       tagGroup INTEGER,
-       tagElement INTEGER,
-       value TEXT,
-       PRIMARY KEY(id, tagGroup, tagElement)
-       );
-
-CREATE TABLE Metadata(
-       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
-       type INTEGER NOT NULL,
-       value TEXT,
-       PRIMARY KEY(id, type)
-       );
-
-CREATE TABLE AttachedFiles(
-       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
-       fileType INTEGER,
-       uuid VARCHAR(64) NOT NULL,
-       compressedSize BIGINT,
-       uncompressedSize BIGINT,
-       compressionType INTEGER,
-       uncompressedHash VARCHAR(40),
-       compressedHash VARCHAR(40),
-       PRIMARY KEY(id, fileType)
-       );              
-
-CREATE TABLE Changes(
-       seq BIGSERIAL NOT NULL PRIMARY KEY,
-       changeType INTEGER,
-       internalId BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
-       resourceType INTEGER,
-       date VARCHAR(64)
-       );
-
-CREATE TABLE ExportedResources(
-       seq BIGSERIAL NOT NULL PRIMARY KEY,
-       resourceType INTEGER,
-       publicId VARCHAR(64),
-       remoteModality TEXT,
-       patientId VARCHAR(64),
-       studyInstanceUid TEXT,
-       seriesInstanceUid TEXT,
-       sopInstanceUid TEXT,
-       date VARCHAR(64)
-       ); 
-
-CREATE TABLE PatientRecyclingOrder(
-       seq BIGSERIAL NOT NULL PRIMARY KEY,
-       patientId BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE
-       );
-
-CREATE INDEX ChildrenIndex ON Resources(parentId);
-CREATE INDEX PublicIndex ON Resources(publicId);
-CREATE INDEX ResourceTypeIndex ON Resources(resourceType);
-CREATE INDEX PatientRecyclingIndex ON PatientRecyclingOrder(patientId);
-
-CREATE INDEX MainDicomTagsIndex ON MainDicomTags(id);
-CREATE INDEX DicomIdentifiersIndex1 ON DicomIdentifiers(id);
-CREATE INDEX DicomIdentifiersIndex2 ON DicomIdentifiers(tagGroup, tagElement);
-CREATE INDEX DicomIdentifiersIndexValues ON DicomIdentifiers(value);
-
-CREATE INDEX ChangesIndex ON Changes(internalId);
-
-
--- New tables wrt. Orthanc core
-CREATE TABLE DeletedFiles(
-       uuid VARCHAR(64) NOT NULL,      -- 0
-       fileType INTEGER,               -- 1
-       compressedSize BIGINT,          -- 2
-       uncompressedSize BIGINT,        -- 3
-       compressionType INTEGER,        -- 4
-       uncompressedHash VARCHAR(40),   -- 5
-       compressedHash VARCHAR(40)      -- 6
-       );
-
-CREATE TABLE RemainingAncestor(
-       resourceType INTEGER NOT NULL,
-       publicId VARCHAR(64) NOT NULL
-       );
-
-CREATE TABLE DeletedResources(
-       resourceType INTEGER NOT NULL,
-       publicId VARCHAR(64) NOT NULL
-       );
--- End of differences
-
-
-CREATE FUNCTION AttachedFileDeletedFunc() 
-RETURNS TRIGGER AS $body$
-BEGIN
-  INSERT INTO DeletedFiles VALUES
-    (old.uuid, old.filetype, old.compressedSize,
-     old.uncompressedSize, old.compressionType,
-     old.uncompressedHash, old.compressedHash);
-  RETURN NULL;
-END;
-$body$ LANGUAGE plpgsql;
-
-CREATE TRIGGER AttachedFileDeleted
-AFTER DELETE ON AttachedFiles
-FOR EACH ROW
-EXECUTE PROCEDURE AttachedFileDeletedFunc();
-
-
--- The following trigger combines 2 triggers from SQLite:
--- ResourceDeleted + ResourceDeletedParentCleaning
-CREATE FUNCTION ResourceDeletedFunc() 
-RETURNS TRIGGER AS $body$
-BEGIN
-  --RAISE NOTICE 'Delete resource %', old.parentId;
-  INSERT INTO DeletedResources VALUES (old.resourceType, old.publicId);
-  
-  -- http://stackoverflow.com/a/11299968/881731
-  IF EXISTS (SELECT 1 FROM Resources WHERE parentId = old.parentId) THEN
-    -- Signal that the deleted resource has a remaining parent
-    INSERT INTO RemainingAncestor
-      SELECT resourceType, publicId FROM Resources WHERE internalId = old.parentId;
-  ELSE
-    -- Delete a parent resource when its unique child is deleted 
-    DELETE FROM Resources WHERE internalId = old.parentId;
-  END IF;
-  RETURN NULL;
-END;
-$body$ LANGUAGE plpgsql;
-
-CREATE TRIGGER ResourceDeleted
-AFTER DELETE ON Resources
-FOR EACH ROW
-EXECUTE PROCEDURE ResourceDeletedFunc();
-
-
-
-CREATE FUNCTION PatientAddedFunc() 
-RETURNS TRIGGER AS $body$
-BEGIN
-  -- The "0" corresponds to "OrthancPluginResourceType_Patient"
-  IF new.resourceType = 0 THEN
-    INSERT INTO PatientRecyclingOrder VALUES (DEFAULT, new.internalId);
-  END IF;
-  RETURN NULL;
-END;
-$body$ LANGUAGE plpgsql;
-
-CREATE TRIGGER PatientAdded
-AFTER INSERT ON Resources
-FOR EACH ROW
-EXECUTE PROCEDURE PatientAddedFunc();
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/PostgreSQL/Plugins/SQL/Downgrades/V6.2ToV6.1.sql	Mon Jan 22 10:00:28 2024 +0100
@@ -0,0 +1,252 @@
+-- This file contains an SQL procedure to downgrade from schema 6.2 to 6.1 (version = 6, revision = 1).
+-- It reinstalls all triggers and temporary tables that have been removed or replaced in 6.2
+
+-- note: we don't not remove the unique constraints that have been added - they should not 
+--       create any issues.
+
+-- these constraints were introduced in 6.2
+ALTER TABLE Resources DROP CONSTRAINT UniquePublicId;
+ALTER TABLE PatientRecyclingOrder DROP CONSTRAINT UniquePatientId;
+
+-- the CreateInstance has been replaced in 6.2, reinstall the 6.1
+DROP FUNCTION CreateInstance;
+CREATE FUNCTION CreateInstance(
+  IN patient TEXT,
+  IN study TEXT,
+  IN series TEXT,
+  IN instance TEXT,
+  OUT isNewPatient BIGINT,
+  OUT isNewStudy BIGINT,
+  OUT isNewSeries BIGINT,
+  OUT isNewInstance BIGINT,
+  OUT patientKey BIGINT,
+  OUT studyKey BIGINT,
+  OUT seriesKey BIGINT,
+  OUT instanceKey BIGINT) AS $body$
+
+DECLARE
+  patientSeq BIGINT;
+  countRecycling BIGINT;
+
+BEGIN
+  SELECT internalId FROM Resources INTO instanceKey WHERE publicId = instance AND resourceType = 3;
+
+  IF NOT (instanceKey IS NULL) THEN
+    -- This instance already exists, stop here
+    isNewInstance := 0;
+  ELSE
+    SELECT internalId FROM Resources INTO patientKey WHERE publicId = patient AND resourceType = 0;
+    SELECT internalId FROM Resources INTO studyKey WHERE publicId = study AND resourceType = 1;
+    SELECT internalId FROM Resources INTO seriesKey WHERE publicId = series AND resourceType = 2;
+
+    IF patientKey IS NULL THEN
+      -- Must create a new patient
+      IF NOT (studyKey IS NULL AND seriesKey IS NULL AND instanceKey IS NULL) THEN
+         RAISE EXCEPTION 'Broken invariant';
+      END IF;
+
+      INSERT INTO Resources VALUES (DEFAULT, 0, patient, NULL) RETURNING internalId INTO patientKey;
+      isNewPatient := 1;
+    ELSE
+      isNewPatient := 0;
+    END IF;
+  
+    IF (patientKey IS NULL) THEN
+       RAISE EXCEPTION 'Broken invariant';
+    END IF;
+
+    IF studyKey IS NULL THEN
+      -- Must create a new study
+      IF NOT (seriesKey IS NULL AND instanceKey IS NULL) THEN
+         RAISE EXCEPTION 'Broken invariant';
+      END IF;
+
+      INSERT INTO Resources VALUES (DEFAULT, 1, study, patientKey) RETURNING internalId INTO studyKey;
+      isNewStudy := 1;
+    ELSE
+      isNewStudy := 0;
+    END IF;
+
+    IF (studyKey IS NULL) THEN
+       RAISE EXCEPTION 'Broken invariant';
+    END IF;
+
+    IF seriesKey IS NULL THEN
+      -- Must create a new series
+      IF NOT (instanceKey IS NULL) THEN
+         RAISE EXCEPTION 'Broken invariant';
+      END IF;
+
+      INSERT INTO Resources VALUES (DEFAULT, 2, series, studyKey) RETURNING internalId INTO seriesKey;
+      isNewSeries := 1;
+    ELSE
+      isNewSeries := 0;
+    END IF;
+  
+    IF (seriesKey IS NULL OR NOT instanceKey IS NULL) THEN
+       RAISE EXCEPTION 'Broken invariant';
+    END IF;
+
+    INSERT INTO Resources VALUES (DEFAULT, 3, instance, seriesKey) RETURNING internalId INTO instanceKey;
+    isNewInstance := 1;
+
+    -- Move the patient to the end of the recycling order
+    SELECT seq FROM PatientRecyclingOrder WHERE patientId = patientKey INTO patientSeq;
+
+    IF NOT (patientSeq IS NULL) THEN
+       -- The patient is not protected
+       SELECT COUNT(*) FROM (SELECT * FROM PatientRecyclingOrder WHERE seq >= patientSeq LIMIT 2) AS tmp INTO countRecycling;
+       IF countRecycling = 2 THEN
+          -- The patient was not at the end of the recycling order
+          DELETE FROM PatientRecyclingOrder WHERE seq = patientSeq;
+          INSERT INTO PatientRecyclingOrder VALUES(DEFAULT, patientKey);
+       END IF;
+    END IF;
+  END IF;  
+END;
+
+$body$ LANGUAGE plpgsql;
+
+
+-- these tables have been deleted in 6.2:
+CREATE TABLE DeletedFiles(
+       uuid VARCHAR(64) NOT NULL,      -- 0
+       fileType INTEGER,               -- 1
+       compressedSize BIGINT,          -- 2
+       uncompressedSize BIGINT,        -- 3
+       compressionType INTEGER,        -- 4
+       uncompressedHash VARCHAR(40),   -- 5
+       compressedHash VARCHAR(40)      -- 6
+       );
+
+CREATE TABLE RemainingAncestor(
+       resourceType INTEGER NOT NULL,
+       publicId VARCHAR(64) NOT NULL
+       );
+
+CREATE TABLE DeletedResources(
+       resourceType INTEGER NOT NULL,
+       publicId VARCHAR(64) NOT NULL
+       );
+
+
+-- these triggers have been introduced in 6.2, remove them
+DROP TRIGGER IF EXISTS IncrementResourcesTracker on Resources;
+DROP TRIGGER IF EXISTS DecrementResourcesTracker on Resources;
+DROP FUNCTION IF EXISTS IncrementResourcesTrackerFunc;
+DROP FUNCTION IF EXISTS DecrementResourcesTrackerFunc;
+
+-- this trigger has been removed in 6.2, reinstall it
+CREATE OR REPLACE FUNCTION CountResourcesTrackerFunc()
+RETURNS TRIGGER AS $$
+BEGIN
+  IF TG_OP = 'INSERT' THEN
+    UPDATE GlobalIntegers SET value = value + 1 WHERE key = new.resourceType + 2;
+    RETURN new;
+  ELSIF TG_OP = 'DELETE' THEN
+    UPDATE GlobalIntegers SET value = value - 1 WHERE key = old.resourceType + 2;
+    RETURN old;
+  END IF;
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE TRIGGER CountResourcesTracker
+AFTER INSERT OR DELETE ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE CountResourcesTrackerFunc();
+
+-- this trigger was introduced in 6.2, remove it:
+DROP FUNCTION IF EXISTS InsertOrUpdateMetadata;
+
+-- reinstall old triggers:
+CREATE OR REPLACE FUNCTION AttachedFileIncrementSizeFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  UPDATE GlobalIntegers SET value = value + new.compressedSize WHERE key = 0;
+  UPDATE GlobalIntegers SET value = value + new.uncompressedSize WHERE key = 1;
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION AttachedFileDecrementSizeFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  UPDATE GlobalIntegers SET value = value - old.compressedSize WHERE key = 0;
+  UPDATE GlobalIntegers SET value = value - old.uncompressedSize WHERE key = 1;
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+DROP TRIGGER AttachedFileIncrementSize ON AttachedFiles;
+CREATE TRIGGER AttachedFileIncrementSize
+AFTER INSERT ON AttachedFiles
+FOR EACH ROW
+EXECUTE PROCEDURE AttachedFileIncrementSizeFunc();
+
+DROP TRIGGER AttachedFileDecrementSize ON AttachedFiles;
+CREATE TRIGGER AttachedFileDecrementSize
+AFTER DELETE ON AttachedFiles
+FOR EACH ROW
+EXECUTE PROCEDURE AttachedFileDecrementSizeFunc();
+
+-- these functions have been introduced in 6.2:
+DROP FUNCTION IF EXISTS UpdateStatistics;
+DROP FUNCTION IF EXISTS UpdateSingleStatistic;
+
+-- this table has been introduced in 6.2:
+DROP TABLE IF EXISTS GlobalIntegersChanges;
+
+-- these functions have been introduced in 6.2:
+DROP FUNCTION IF EXISTS CreateDeletedFilesTemporaryTable;
+DROP FUNCTION IF EXISTS DeleteResource;
+
+-- reinstall this old trigger:
+CREATE OR REPLACE FUNCTION ResourceDeletedFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  --RAISE NOTICE 'Delete resource %', old.parentId;
+  INSERT INTO DeletedResources VALUES (old.resourceType, old.publicId);
+  
+  -- http://stackoverflow.com/a/11299968/881731
+  IF EXISTS (SELECT 1 FROM Resources WHERE parentId = old.parentId) THEN
+    -- Signal that the deleted resource has a remaining parent
+    INSERT INTO RemainingAncestor
+      SELECT resourceType, publicId FROM Resources WHERE internalId = old.parentId;
+  ELSE
+    -- Delete a parent resource when its unique child is deleted 
+    DELETE FROM Resources WHERE internalId = old.parentId;
+  END IF;
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+DROP TRIGGER IF EXISTS ResourceDeleted ON Resources;
+CREATE TRIGGER ResourceDeleted
+AFTER DELETE ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE ResourceDeletedFunc();
+
+-- reinstall this old trigger:
+CREATE OR REPLACE FUNCTION PatientAddedFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  -- The "0" corresponds to "OrthancPluginResourceType_Patient"
+  IF new.resourceType = 0 THEN
+    INSERT INTO PatientRecyclingOrder VALUES (DEFAULT, new.internalId);
+  END IF;
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+DROP TRIGGER IF EXISTS PatientAdded ON Resources;
+CREATE TRIGGER PatientAdded
+AFTER INSERT ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE PatientAddedFunc();
+
+
+-- set the global properties that actually documents the DB version, revision and some of the capabilities
+-- modify only the ones that have changed
+DELETE FROM GlobalProperties WHERE property IN (4, 11);
+INSERT INTO GlobalProperties VALUES (4, 1); -- GlobalProperty_DatabasePatchLevel
+INSERT INTO GlobalProperties VALUES (11, 2); -- GlobalProperty_HasCreateInstance
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/PostgreSQL/Plugins/SQL/PrepareIndex.sql	Mon Jan 22 10:00:28 2024 +0100
@@ -0,0 +1,561 @@
+-- This SQL file creates a DB in version.revision 6.2 directly
+-- It is also run after upgrade scripts to create new tables and or create/replace triggers and functions.
+-- This script is self contained, it contains everything that needs to be run to create an Orthanc DB.
+-- Note to developers: 
+--   - it is and must stay idempotent.
+--   - it is executed when the DB is "locked", only one Orthanc instance can execute it at a given time.                
+
+CREATE TABLE IF NOT EXISTS GlobalProperties(
+       property INTEGER PRIMARY KEY,
+       value TEXT
+       );
+
+CREATE TABLE IF NOT EXISTS Resources(
+       internalId BIGSERIAL NOT NULL PRIMARY KEY,
+       resourceType INTEGER NOT NULL,
+       publicId VARCHAR(64) NOT NULL,
+       parentId BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       CONSTRAINT UniquePublicId UNIQUE (publicId)
+       );
+
+CREATE TABLE IF NOT EXISTS MainDicomTags(
+       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       tagGroup INTEGER,
+       tagElement INTEGER,
+       value TEXT,
+       PRIMARY KEY(id, tagGroup, tagElement)
+       );
+
+CREATE TABLE IF NOT EXISTS DicomIdentifiers(
+       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       tagGroup INTEGER,
+       tagElement INTEGER,
+       value TEXT,
+       PRIMARY KEY(id, tagGroup, tagElement)
+       );
+
+CREATE TABLE IF NOT EXISTS Metadata(
+       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       type INTEGER NOT NULL,
+       value TEXT,
+       revision INTEGER,
+       PRIMARY KEY(id, type)
+       );
+
+CREATE TABLE IF NOT EXISTS AttachedFiles(
+       id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       fileType INTEGER,
+       uuid VARCHAR(64) NOT NULL,
+       compressedSize BIGINT,
+       uncompressedSize BIGINT,
+       compressionType INTEGER,
+       uncompressedHash VARCHAR(40),
+       compressedHash VARCHAR(40),
+       revision INTEGER,
+       PRIMARY KEY(id, fileType)
+       );              
+
+CREATE TABLE IF NOT EXISTS Changes(
+       seq BIGSERIAL NOT NULL PRIMARY KEY,
+       changeType INTEGER,
+       internalId BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       resourceType INTEGER,
+       date VARCHAR(64)
+       );
+
+CREATE TABLE IF NOT EXISTS ExportedResources(
+       seq BIGSERIAL NOT NULL PRIMARY KEY,
+       resourceType INTEGER,
+       publicId VARCHAR(64),
+       remoteModality TEXT,
+       patientId VARCHAR(64),
+       studyInstanceUid TEXT,
+       seriesInstanceUid TEXT,
+       sopInstanceUid TEXT,
+       date VARCHAR(64)
+       ); 
+
+CREATE TABLE IF NOT EXISTS PatientRecyclingOrder(
+       seq BIGSERIAL NOT NULL PRIMARY KEY,
+       patientId BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+       CONSTRAINT UniquePatientId UNIQUE (patientId)
+       );
+
+CREATE TABLE IF NOT EXISTS Labels(
+        id BIGINT REFERENCES Resources(internalId) ON DELETE CASCADE,
+        label TEXT, 
+        PRIMARY KEY(id, label)
+        );
+
+CREATE TABLE IF NOT EXISTS GlobalIntegers(
+       key INTEGER PRIMARY KEY,
+       value BIGINT);
+-- GlobalIntegers keys:
+-- 0: CompressedSize
+-- 1: UncompressedSize
+-- 2: PatientsCount
+-- 3: StudiesCount
+-- 4: SeriesCount
+-- 5: InstancesCount
+-- 6: ChangeSeq
+-- 7: PatientRecyclingOrderSeq
+
+CREATE TABLE IF NOT EXISTS ServerProperties(
+        server VARCHAR(64) NOT NULL,
+        property INTEGER, value TEXT, 
+        PRIMARY KEY(server, property)
+        );
+
+CREATE INDEX IF NOT EXISTS ChildrenIndex ON Resources(parentId);
+CREATE INDEX IF NOT EXISTS PublicIndex ON Resources(publicId);
+CREATE INDEX IF NOT EXISTS ResourceTypeIndex ON Resources(resourceType);
+CREATE INDEX IF NOT EXISTS PatientRecyclingIndex ON PatientRecyclingOrder(patientId);
+
+CREATE INDEX IF NOT EXISTS MainDicomTagsIndex ON MainDicomTags(id);
+CREATE INDEX IF NOT EXISTS DicomIdentifiersIndex1 ON DicomIdentifiers(id);
+CREATE INDEX IF NOT EXISTS DicomIdentifiersIndex2 ON DicomIdentifiers(tagGroup, tagElement);
+CREATE INDEX IF NOT EXISTS DicomIdentifiersIndexValues ON DicomIdentifiers(value);
+
+CREATE INDEX IF NOT EXISTS ChangesIndex ON Changes(internalId);
+CREATE INDEX IF NOT EXISTS LabelsIndex1 ON LABELS(id);
+CREATE INDEX IF NOT EXISTS LabelsIndex2 ON LABELS(label);
+
+------------------- Trigram index creation -------------------
+
+
+-- Apply fix for performance issue (speed up wildcard search by using GIN trigrams). This implements the patch suggested
+-- in issue #47, BUT we also keep the original "DicomIdentifiersIndexValues", as it leads to better
+-- performance for "strict" searches (i.e. searches involving no wildcard).
+-- https://www.postgresql.org/docs/current/static/pgtrgm.html
+-- https://orthanc.uclouvain.be/bugs/show_bug.cgi?id=47
+
+DO $body$
+begin
+	IF EXISTS (SELECT 1 FROM pg_available_extensions WHERE name='pg_trgm') THEN
+		CREATE EXTENSION IF NOT EXISTS pg_trgm;
+        CREATE INDEX IF NOT EXISTS DicomIdentifiersIndexValues2 ON DicomIdentifiers USING gin(value gin_trgm_ops);
+	ELSE
+		RAISE NOTICE 'pg_trgm extension is not available on you system';
+	END IF;
+END $body$;
+
+
+------------------- PatientAdded trigger & PatientRecyclingOrder -------------------
+DROP TRIGGER IF EXISTS PatientAdded ON Resources;
+
+CREATE OR REPLACE FUNCTION PatientAddedOrUpdated(
+    IN patient_id BIGINT,
+    IN is_update BIGINT
+    )
+RETURNS VOID AS $body$
+BEGIN
+    DECLARE
+        newSeq BIGINT;
+    BEGIN
+        UPDATE GlobalIntegers SET value = value + 1 WHERE key = 7 RETURNING value INTO newSeq;
+        IF is_update > 0 THEN
+            -- Note: Protected patients are not listed in this table !  So, they won't be updated
+            UPDATE PatientRecyclingOrder SET seq = newSeq WHERE PatientRecyclingOrder.patientId = patient_id;
+        ELSE
+            INSERT INTO PatientRecyclingOrder VALUES (newSeq, patient_id);
+        END IF;
+    END;
+END;
+$body$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION PatientAddedFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  -- The "0" corresponds to "OrthancPluginResourceType_Patient"
+  IF new.resourceType = 0 THEN
+    PERFORM PatientAddedOrUpdated(new.internalId, 0);
+  END IF;
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+CREATE TRIGGER PatientAdded
+AFTER INSERT ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE PatientAddedFunc();
+
+-- initial population of PatientRecyclingOrderSeq
+INSERT INTO GlobalIntegers
+    SELECT 7, CAST(COALESCE(MAX(seq), 0) AS BIGINT) FROM PatientRecyclingOrder
+    ON CONFLICT DO NOTHING;
+
+
+------------------- ResourceDeleted trigger -------------------
+DROP TRIGGER IF EXISTS ResourceDeleted ON Resources;
+
+-- The following trigger combines 2 triggers from SQLite:
+-- ResourceDeleted + ResourceDeletedParentCleaning
+CREATE OR REPLACE FUNCTION ResourceDeletedFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  -- RAISE NOTICE 'ResourceDeletedFunc %', old.publicId;
+  INSERT INTO DeletedResources VALUES (old.resourceType, old.publicId);
+  
+  -- If this resource is the latest child, delete the parent
+  DELETE FROM Resources WHERE internalId = old.parentId
+                              AND NOT EXISTS (SELECT 1 FROM Resources WHERE parentId = old.parentId);
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+CREATE TRIGGER ResourceDeleted
+AFTER DELETE ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE ResourceDeletedFunc();
+
+
+------------------- DeleteResource function -------------------
+
+CREATE OR REPLACE FUNCTION DeleteResource(
+    IN id BIGINT,
+    OUT remaining_ancestor_resource_type INTEGER,
+    OUT remaining_anncestor_public_id TEXT) AS $body$
+
+DECLARE
+    deleted_row RECORD;
+    locked_row RECORD;
+
+BEGIN
+
+    SET client_min_messages = warning;   -- suppress NOTICE:  relation "deletedresources" already exists, skipping
+    
+    -- note: temporary tables are created at session (connection) level -> they are likely to exist
+    -- these tables are used by the triggers
+    CREATE TEMPORARY TABLE IF NOT EXISTS DeletedResources(
+        resourceType INTEGER NOT NULL,
+        publicId VARCHAR(64) NOT NULL
+        );
+
+    RESET client_min_messages;
+
+    -- clear the temporary table in case it has been created earlier in the session
+    DELETE FROM DeletedResources;
+    
+    -- create/clear the DeletedFiles temporary table
+    PERFORM CreateDeletedFilesTemporaryTable();
+
+    -- Before deleting an object, we need to lock its parent until the end of the transaction to avoid that
+    -- 2 threads deletes the last 2 instances of a series at the same time -> none of them would realize
+    -- that they are deleting the last instance and the parent resources would not be deleted.
+    -- Locking only the immediate parent is sufficient to prevent from this.
+    SELECT * INTO locked_row FROM resources WHERE internalid = (SELECT parentid FROM resources WHERE internalid = id) FOR UPDATE;
+
+    -- delete the resource itself
+    DELETE FROM Resources WHERE internalId=id RETURNING * INTO deleted_row;
+    -- note: there is a ResourceDeletedFunc trigger that will execute here and delete the parents if there are no remaining children + 
+
+    -- If this resource still has siblings, keep track of the remaining parent
+    -- (a parent that must not be deleted but whose LastUpdate must be updated)
+    SELECT resourceType, publicId INTO remaining_ancestor_resource_type, remaining_anncestor_public_id
+        FROM Resources 
+        WHERE internalId = deleted_row.parentId
+            AND EXISTS (SELECT 1 FROM Resources WHERE parentId = deleted_row.parentId);
+
+END;
+
+$body$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION CreateDeletedFilesTemporaryTable(
+) RETURNS VOID AS $body$
+
+BEGIN
+
+    SET client_min_messages = warning;   -- suppress NOTICE:  relation "deletedresources" already exists, skipping
+    
+    -- note: temporary tables are created at session (connection) level -> they are likely to exist
+    CREATE TEMPORARY TABLE IF NOT EXISTS DeletedFiles(
+        uuid VARCHAR(64) NOT NULL,
+        fileType INTEGER,
+        compressedSize BIGINT,
+        uncompressedSize BIGINT,
+        compressionType INTEGER,
+        uncompressedHash VARCHAR(40),
+        compressedHash VARCHAR(40)
+        );
+
+    RESET client_min_messages;
+
+    -- clear the temporary table in case it has been created earlier in the session
+    DELETE FROM DeletedFiles;
+END;
+
+$body$ LANGUAGE plpgsql;
+
+
+CREATE OR REPLACE FUNCTION AttachedFileDeletedFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  INSERT INTO DeletedFiles VALUES
+    (old.uuid, old.filetype, old.compressedSize,
+     old.uncompressedSize, old.compressionType,
+     old.uncompressedHash, old.compressedHash);
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+DROP TRIGGER IF EXISTS AttachedFileDeleted on AttachedFiles;
+CREATE TRIGGER AttachedFileDeleted
+AFTER DELETE ON AttachedFiles
+FOR EACH ROW
+EXECUTE PROCEDURE AttachedFileDeletedFunc();
+
+
+------------------- Fast Statistics -------------------
+
+-- initial population of GlobalIntegers if not already there
+INSERT INTO GlobalIntegers
+    SELECT 0, CAST(COALESCE(SUM(compressedSize), 0) AS BIGINT) FROM AttachedFiles
+    ON CONFLICT DO NOTHING;
+
+INSERT INTO GlobalIntegers
+    SELECT 1, CAST(COALESCE(SUM(uncompressedSize), 0) AS BIGINT) FROM AttachedFiles
+    ON CONFLICT DO NOTHING;
+
+INSERT INTO GlobalIntegers
+    SELECT 2, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 0  -- Count patients
+    ON CONFLICT DO NOTHING;
+
+INSERT INTO GlobalIntegers
+    SELECT 3, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 1  -- Count studies
+    ON CONFLICT DO NOTHING;
+
+INSERT INTO GlobalIntegers
+    SELECT 4, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 2  -- Count series
+    ON CONFLICT DO NOTHING;
+
+INSERT INTO GlobalIntegers
+    SELECT 5, CAST(COALESCE(COUNT(*), 0) AS BIGINT) FROM Resources WHERE resourceType = 3  -- Count instances
+    ON CONFLICT DO NOTHING;
+
+
+-- this table stores all changes that needs to be performed to the GlobalIntegers table
+-- This way, each transaction can add row independently in this table without having to lock
+-- any row (which was the case with previous FastTotalSize).
+-- These changes will be applied at regular interval by an external thread or when someone
+-- requests the statistics
+CREATE TABLE IF NOT EXISTS GlobalIntegersChanges(
+    key INTEGER,
+    value BIGINT);
+
+CREATE OR REPLACE FUNCTION UpdateSingleStatistic(
+    IN statistics_key INTEGER,
+    OUT new_value BIGINT
+) AS $body$
+BEGIN
+
+  -- Delete the current changes, sum them and update the GlobalIntegers row.
+  -- New rows can be added in the meantime, they won't be deleted or summed.
+  WITH deleted_rows AS (
+      DELETE FROM GlobalIntegersChanges
+      WHERE GlobalIntegersChanges.key = statistics_key
+      RETURNING value
+  )
+  UPDATE GlobalIntegers
+  SET value = value + (
+      SELECT COALESCE(SUM(value), 0)
+      FROM deleted_rows
+  )
+  WHERE GlobalIntegers.key = statistics_key
+  RETURNING value INTO new_value;
+
+END;
+$body$ LANGUAGE plpgsql;
+
+
+CREATE OR REPLACE FUNCTION UpdateStatistics(
+  OUT patients_cunt BIGINT,
+  OUT studies_count BIGINT,
+  OUT series_count BIGINT,
+  OUT instances_count BIGINT,
+  OUT total_compressed_size BIGINT,
+  OUT total_uncompressed_size BIGINT
+) AS $body$
+BEGIN
+
+  SELECT UpdateSingleStatistic(0) INTO total_compressed_size;
+  SELECT UpdateSingleStatistic(1) INTO total_uncompressed_size;
+  SELECT UpdateSingleStatistic(2) INTO patients_cunt;
+  SELECT UpdateSingleStatistic(3) INTO studies_count;
+  SELECT UpdateSingleStatistic(4) INTO series_count;
+  SELECT UpdateSingleStatistic(5) INTO instances_count;
+
+END;
+$body$ LANGUAGE plpgsql;
+
+
+CREATE OR REPLACE FUNCTION IncrementResourcesTrackerFunc()
+RETURNS TRIGGER AS $$
+BEGIN
+  INSERT INTO GlobalIntegersChanges VALUES(new.resourceType + 2, 1);
+  RETURN NULL;
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION DecrementResourcesTrackerFunc()
+RETURNS TRIGGER AS $$
+BEGIN
+  INSERT INTO GlobalIntegersChanges VALUES(old.resourceType + 2, -1);
+  RETURN NULL;
+END;
+$$ LANGUAGE plpgsql;
+
+
+CREATE OR REPLACE FUNCTION AttachedFileIncrementSizeFunc()
+RETURNS TRIGGER AS $body$
+BEGIN
+  INSERT INTO GlobalIntegersChanges VALUES(0, new.compressedSize);
+  INSERT INTO GlobalIntegersChanges VALUES(1, new.uncompressedSize);
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION AttachedFileDecrementSizeFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+  INSERT INTO GlobalIntegersChanges VALUES(0, -old.compressedSize);
+  INSERT INTO GlobalIntegersChanges VALUES(1, -old.uncompressedSize);
+  RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+DROP TRIGGER IF EXISTS AttachedFileIncrementSize on AttachedFiles;
+CREATE TRIGGER AttachedFileIncrementSize
+AFTER INSERT ON AttachedFiles
+FOR EACH ROW
+EXECUTE PROCEDURE AttachedFileIncrementSizeFunc();
+
+DROP TRIGGER IF EXISTS AttachedFileDecrementSize on AttachedFiles;
+CREATE TRIGGER AttachedFileDecrementSize
+AFTER DELETE ON AttachedFiles
+FOR EACH ROW
+EXECUTE PROCEDURE AttachedFileDecrementSizeFunc();
+
+DROP TRIGGER IF EXISTS IncrementResourcesTracker on Resources;
+CREATE TRIGGER IncrementResourcesTracker
+AFTER INSERT ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE IncrementResourcesTrackerFunc();
+
+DROP TRIGGER IF EXISTS DecrementResourcesTracker on Resources;
+CREATE TRIGGER DecrementResourcesTracker
+AFTER DELETE ON Resources
+FOR EACH ROW
+EXECUTE PROCEDURE DecrementResourcesTrackerFunc();
+
+
+------------------- InsertOrUpdateMetadata function -------------------
+CREATE OR REPLACE FUNCTION InsertOrUpdateMetadata(resource_ids BIGINT[],
+                                                  metadata_types INTEGER[], 
+                                                  metadata_values TEXT[],
+                                                  revisions INTEGER[])
+RETURNS VOID AS $body$
+BEGIN
+  	FOR i IN 1 .. ARRAY_LENGTH(resource_ids, 1) LOOP
+		-- RAISE NOTICE 'Parameter %: % % %', i, resource_ids[i], metadata_types[i], metadata_values[i];
+		INSERT INTO Metadata VALUES(resource_ids[i], metadata_types[i], metadata_values[i], revisions[i]) 
+          ON CONFLICT (id, type) DO UPDATE SET value = EXCLUDED.value, revision = EXCLUDED.revision;
+	END LOOP;
+  
+END;
+$body$ LANGUAGE plpgsql;
+
+
+------------------- GetLastChange function -------------------
+DROP TRIGGER IF EXISTS InsertedChange ON Changes;
+
+-- insert the value if not already there
+INSERT INTO GlobalIntegers
+    SELECT 6, CAST(COALESCE(MAX(seq), 0) AS BIGINT) FROM Changes
+    ON CONFLICT DO NOTHING;
+
+CREATE OR REPLACE FUNCTION InsertedChangeFunc() 
+RETURNS TRIGGER AS $body$
+BEGIN
+    UPDATE GlobalIntegers SET value = new.seq WHERE key = 6;
+    RETURN NULL;
+END;
+$body$ LANGUAGE plpgsql;
+
+CREATE TRIGGER InsertedChange
+AFTER INSERT ON Changes
+FOR EACH ROW
+EXECUTE PROCEDURE InsertedChangeFunc();
+
+
+------------------- CreateInstance function -------------------
+CREATE OR REPLACE FUNCTION CreateInstance(
+  IN patient_public_id TEXT,
+  IN study_public_id TEXT,
+  IN series_public_id TEXT,
+  IN instance_public_id TEXT,
+  OUT is_new_patient BIGINT,
+  OUT is_new_study BIGINT,
+  OUT is_new_series BIGINT,
+  OUT is_new_instance BIGINT,
+  OUT patient_internal_id BIGINT,
+  OUT study_internal_id BIGINT,
+  OUT series_internal_id BIGINT,
+  OUT instance_internal_id BIGINT) AS $body$
+
+BEGIN
+	is_new_patient := 1;
+	is_new_study := 1;
+	is_new_series := 1;
+	is_new_instance := 1;
+
+	BEGIN
+        INSERT INTO "resources" VALUES (DEFAULT, 0, patient_public_id, NULL) RETURNING internalid INTO patient_internal_id;
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_patient := 0;
+            SELECT internalid INTO patient_internal_id FROM "resources" WHERE publicId = patient_public_id FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction
+    END;
+
+	BEGIN
+        INSERT INTO "resources" VALUES (DEFAULT, 1, study_public_id, patient_internal_id) RETURNING internalid INTO study_internal_id;
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_study := 0;
+            SELECT internalid INTO study_internal_id FROM "resources" WHERE publicId = study_public_id FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction    END;
+    END;
+
+	BEGIN
+	    INSERT INTO "resources" VALUES (DEFAULT, 2, series_public_id, study_internal_id) RETURNING internalid INTO series_internal_id;
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_series := 0;
+            SELECT internalid INTO series_internal_id FROM "resources" WHERE publicId = series_public_id FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction    END;
+    END;
+
+  	BEGIN
+		INSERT INTO "resources" VALUES (DEFAULT, 3, instance_public_id, series_internal_id) RETURNING internalid INTO instance_internal_id;
+    EXCEPTION
+        WHEN unique_violation THEN
+            is_new_instance := 0;
+            SELECT internalid INTO instance_internal_id FROM "resources" WHERE publicId = instance_public_id FOR UPDATE;  -- also locks the resource and its parent to prevent from deletion while we complete this transaction
+    END;
+
+    IF is_new_instance > 0 THEN
+        -- Move the patient to the end of the recycling order.
+        PERFORM PatientAddedOrUpdated(patient_internal_id, 1);
+    END IF;  
+END;
+
+$body$ LANGUAGE plpgsql;
+
+
+
+-- set the global properties that actually documents the DB version, revision and some of the capabilities
+DELETE FROM GlobalProperties WHERE property IN (1, 4, 6, 10, 11, 12, 13);
+INSERT INTO GlobalProperties VALUES (1, 6); -- GlobalProperty_DatabaseSchemaVersion
+INSERT INTO GlobalProperties VALUES (4, 2); -- GlobalProperty_DatabasePatchLevel
+INSERT INTO GlobalProperties VALUES (6, 1); -- GlobalProperty_GetTotalSizeIsFast
+INSERT INTO GlobalProperties VALUES (10, 1); -- GlobalProperty_HasTrigramIndex
+INSERT INTO GlobalProperties VALUES (11, 3); -- GlobalProperty_HasCreateInstance  -- this is actually the 3rd version of HasCreateInstance
+INSERT INTO GlobalProperties VALUES (12, 1); -- GlobalProperty_HasFastCountResources
+INSERT INTO GlobalProperties VALUES (13, 1); -- GlobalProperty_GetLastChangeIndex
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/PostgreSQL/Plugins/SQL/Upgrades/UnknownToV6.1.sql	Mon Jan 22 10:00:28 2024 +0100
@@ -0,0 +1,18 @@
+-- add the revision columns if not yet done
+
+DO $body$
+BEGIN
+	IF NOT EXISTS (SELECT * FROM information_schema.columns WHERE table_schema='public' AND table_name='metadata' AND column_name='revision') THEN
+		ALTER TABLE Metadata ADD COLUMN revision INTEGER;
+	ELSE
+		raise notice 'the metadata.revision column already exists';
+	END IF;
+
+	IF NOT EXISTS (SELECT * FROM information_schema.columns WHERE table_schema='public' AND table_name='attachedfiles' AND column_name='revision') THEN
+		ALTER TABLE AttachedFiles ADD COLUMN revision INTEGER;
+	ELSE
+		raise notice 'the attachedfiles.revision column already exists';
+	END IF;
+
+END $body$;
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/PostgreSQL/Plugins/SQL/Upgrades/V6.1ToV6.2.sql	Mon Jan 22 10:00:28 2024 +0100
@@ -0,0 +1,46 @@
+-- This file contains part of the changes required to upgrade from 6.1 to 6.2 (DB version 6 and revision 2)
+-- It actually contains only the changes that:
+   -- can not be executed with an idempotent statement in SQL
+   -- or would polute the PrepareIndex.sql
+-- This file is executed only if the current schema is in revision 1 and it is executed 
+-- before PrepareIndex.sql that is idempotent.
+
+
+-- add unique constraints if they do not exists
+DO $body$
+BEGIN
+
+    IF NOT EXISTS (
+        SELECT 1
+        FROM information_schema.table_constraints
+        WHERE table_schema = 'public' 
+            AND table_name = 'resources'
+            AND constraint_name = 'uniquepublicid')
+    THEN
+        ALTER TABLE Resources ADD CONSTRAINT UniquePublicId UNIQUE (publicId);
+        RAISE NOTICE 'UniquePublicId constraint added to Resources.';
+    END IF;
+
+    IF NOT EXISTS (
+        SELECT 1
+        FROM information_schema.table_constraints
+        WHERE table_schema = 'public' 
+            AND table_name = 'patientrecyclingorder'
+            AND constraint_name = 'uniquepatientid')
+    THEN
+        ALTER TABLE PatientRecyclingOrder ADD CONSTRAINT UniquePatientId UNIQUE (patientId);
+        RAISE NOTICE 'UniquePatientId constraint added to PatientRecyclingOrder.';
+    END IF;
+
+END $body$ LANGUAGE plpgsql;
+
+
+-- In V6.2, we'll now use temporary tables so we need to remove the old tables that might have been used in previous revisions !
+-- these statements, although idempotent, are not part of PrepareIndexV2.sql to keep it clean
+DROP TABLE IF EXISTS DeletedFiles;
+DROP TABLE IF EXISTS RemainingAncestor;
+DROP TABLE IF EXISTS DeletedResources;
+
+-- These triggers disappears and are not replaced in V6.2
+DROP TRIGGER IF EXISTS CountResourcesTracker ON Resources;
+
--- a/README	Wed Sep 13 13:32:54 2023 +0200
+++ b/README	Mon Jan 22 10:00:28 2024 +0100
@@ -24,7 +24,7 @@
 (.tar.gz) focused on one given RDBMS, you will only find the folders
 that are related to this specific RDBMS. The full source code is
 available at:
-https://hg.orthanc-server.com/orthanc-databases/
+https://orthanc.uclouvain.be/hg/orthanc-databases/
 
 
 Compilation and usage
@@ -33,9 +33,9 @@
 The compilation and usage of the plugins is available in the Orthanc
 Book:
 
-* MySQL/MariaDB : http://book.orthanc-server.com/plugins/mysql.html
-* PostgreSQL    : http://book.orthanc-server.com/plugins/postgresql.html
-* ODBC          : http://book.orthanc-server.com/plugins/odbc.html
+* MySQL/MariaDB : https://orthanc.uclouvain.be/book/plugins/mysql.html
+* PostgreSQL    : https://orthanc.uclouvain.be/book/plugins/postgresql.html
+* ODBC          : https://orthanc.uclouvain.be/book/plugins/odbc.html
 
 
 Older releases of PostgreSQL
@@ -46,7 +46,7 @@
 
 Releases <= 2.1 of the PostgreSQL plugins can still be found in the
 following legacy repository:
-https://hg.orthanc-server.com/orthanc-postgresql/
+https://orthanc.uclouvain.be/hg/orthanc-postgresql/
 
 
 Contributing
@@ -54,7 +54,7 @@
 
 Instructions for contributing to the Orthanc project are included in
 the Orthanc Book:
-https://book.orthanc-server.com/developers/repositories.html
+https://orthanc.uclouvain.be/book/developers/repositories.html
 
 
 Licensing
--- a/Resources/CMake/MariaDBConfiguration.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/CMake/MariaDBConfiguration.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -25,7 +25,7 @@
   set(MARIADB_PACKAGE_VERSION "3.1.11")
   set(MARIADB_CLIENT_SOURCES_DIR ${CMAKE_BINARY_DIR}/mariadb-connector-c-${MARIADB_PACKAGE_VERSION}-src)
   set(MARIADB_CLIENT_MD5 "cf9da5f0ac9ec72dd8309bdc1d1c6c2f")
-  set(MARIADB_CLIENT_URL "https://orthanc.uclouvain.be/third-party-downloads/mariadb-connector-c-${MARIADB_PACKAGE_VERSION}-src.tar.gz")
+  set(MARIADB_CLIENT_URL "https://orthanc.uclouvain.be/downloads/third-party-downloads/mariadb-connector-c-${MARIADB_PACKAGE_VERSION}-src.tar.gz")
 
   if (IS_DIRECTORY "${MARIADB_CLIENT_SOURCES_DIR}")
     set(FirstRun OFF)
--- a/Resources/CMake/PostgreSQLConfiguration.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/CMake/PostgreSQLConfiguration.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -54,7 +54,7 @@
   SET(LIBPQ_SOURCES_DIR ${CMAKE_BINARY_DIR}/postgresql-${LIBPQ_VERSION})
   DownloadPackage(
     "551302a823a1ab48b4ed14166beebba9"
-    "https://orthanc.uclouvain.be/third-party-downloads/postgresql-${LIBPQ_VERSION}.tar.gz"
+    "https://orthanc.uclouvain.be/downloads/third-party-downloads/postgresql-${LIBPQ_VERSION}.tar.gz"
     "${LIBPQ_SOURCES_DIR}")
 
   
--- a/Resources/CMake/UnixOdbcConfiguration.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/CMake/UnixOdbcConfiguration.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -25,7 +25,7 @@
   set(VERSION "2.3.9")  # Used in "config.h.in"
   set(UNIX_ODBC_SOURCES_DIR ${CMAKE_BINARY_DIR}/unixODBC-${VERSION})
   set(UNIX_ODBC_MD5 "06f76e034bb41df5233554abe961a16f")
-  set(UNIX_ODBC_URL "https://orthanc.uclouvain.be/third-party-downloads/unixODBC-${VERSION}.tar.gz")
+  set(UNIX_ODBC_URL "https://orthanc.uclouvain.be/downloads/third-party-downloads/unixODBC-${VERSION}.tar.gz")
 
   DownloadPackage(${UNIX_ODBC_MD5} ${UNIX_ODBC_URL} "${UNIX_ODBC_SOURCES_DIR}")
 
--- a/Resources/Orthanc/CMake/Compiler.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/CMake/Compiler.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -263,3 +263,24 @@
   # preceding batches. https://cmake.org/Bug/view.php?id=14874
   set(CMAKE_CXX_ARCHIVE_APPEND "<CMAKE_AR> <LINK_FLAGS> q <TARGET> <OBJECTS>")
 endif()
+
+
+# This function defines macro "__ORTHANC_FILE__" as a replacement to
+# macro "__FILE__", as the latter leaks the full path of the source
+# files in the binaries
+# https://stackoverflow.com/questions/8487986/file-macro-shows-full-path
+# https://twitter.com/wget42/status/1676877802375634944?s=20
+function(DefineSourceBasenameForTarget targetname)
+  # Microsoft Visual Studio is extremely slow if using
+  # "set_property()", we only enable this feature for gcc and clang
+  if (CMAKE_COMPILER_IS_GNUCXX OR
+      CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+    get_target_property(source_files "${targetname}" SOURCES)
+    foreach(sourcefile ${source_files})
+      get_filename_component(basename "${sourcefile}" NAME)
+      set_property(
+        SOURCE "${sourcefile}" APPEND
+        PROPERTY COMPILE_DEFINITIONS "__ORTHANC_FILE__=\"${basename}\"")
+    endforeach()
+  endif()
+endfunction()
--- a/Resources/Orthanc/CMake/DownloadOrthancFramework.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/CMake/DownloadOrthancFramework.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -153,9 +153,11 @@
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "1.11.2")
         set(ORTHANC_FRAMEWORK_MD5 "ede3de356493a8868545f8cb4b8bc8b5")
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "1.11.3")
-        set(ORTHANC_FRAMEWORK_MD5 "5c1b11009d782f248739919db6bf7f7a")
+        set(ORTHANC_FRAMEWORK_MD5 "f941c0f5771db7616e7b7961026a60e2")
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "1.12.0")
         set(ORTHANC_FRAMEWORK_MD5 "d32a0cde03b6eb603d8dd2b33d38bf1b")
+      elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "1.12.1")
+        set(ORTHANC_FRAMEWORK_MD5 "8a435140efc8ff4a01d8242f092f21de")
 
       # Below this point are development snapshots that were used to
       # release some plugin, before an official release of the Orthanc
@@ -166,19 +168,28 @@
       #
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "ae0e3fd609df")
         # DICOMweb 1.1 (framework pre-1.6.0)
+        set(ORTHANC_FRAMEWORK_PRE_RELEASE ON)
         set(ORTHANC_FRAMEWORK_MD5 "7e09e9b530a2f527854f0b782d7e0645")
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "82652c5fc04f")
         # Stone Web viewer 1.0 (framework pre-1.8.1)
+        set(ORTHANC_FRAMEWORK_PRE_RELEASE ON)
         set(ORTHANC_FRAMEWORK_MD5 "d77331d68917e66a3f4f9b807bbdab7f")
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "4a3ba4bf4ba7")
         # PostgreSQL 3.3 (framework pre-1.8.2)
+        set(ORTHANC_FRAMEWORK_PRE_RELEASE ON)
         set(ORTHANC_FRAMEWORK_MD5 "2d82bddf06f9cfe82095495cb3b8abde")
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "23ad1b9c7800")
         # For "Toolbox::ReadJson()" and "Toolbox::Write{...}Json()" (pre-1.9.0)
+        set(ORTHANC_FRAMEWORK_PRE_RELEASE ON)
         set(ORTHANC_FRAMEWORK_MD5 "9af92080e57c60dd288eba46ce606c00")
       elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "b2e08d83e21d")
         # WSI 1.1 (framework pre-1.10.0), to remove "-std=c++11"
+        set(ORTHANC_FRAMEWORK_PRE_RELEASE ON)
         set(ORTHANC_FRAMEWORK_MD5 "2eaa073cbb4b44ffba199ad93393b2b1")
+      elseif (ORTHANC_FRAMEWORK_VERSION STREQUAL "daf4807631c5")
+        # DICOMweb 1.15 (framework pre-1.12.2)
+        set(ORTHANC_FRAMEWORK_PRE_RELEASE ON)
+        set(ORTHANC_FRAMEWORK_MD5 "c644aff2817306b3207c98c92e43f35f")
       endif()
     endif()
   endif()
@@ -266,7 +277,7 @@
   else()
     message("Forking the Orthanc source repository using Mercurial")
     execute_process(
-      COMMAND ${ORTHANC_FRAMEWORK_HG} clone "https://hg.orthanc-server.com/orthanc/"
+      COMMAND ${ORTHANC_FRAMEWORK_HG} clone "https://orthanc.uclouvain.be/hg/orthanc/"
       WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
       RESULT_VARIABLE Failure
       )    
@@ -315,7 +326,11 @@
   else()
     # Default case: Download from the official Web site
     set(ORTHANC_FRAMEMORK_FILENAME Orthanc-${ORTHANC_FRAMEWORK_VERSION}.tar.gz)
-    set(ORTHANC_FRAMEWORK_URL "https://orthanc.uclouvain.be/third-party-downloads/orthanc-framework/${ORTHANC_FRAMEMORK_FILENAME}")
+    if (ORTHANC_FRAMEWORK_PRE_RELEASE)
+      set(ORTHANC_FRAMEWORK_URL "https://orthanc.uclouvain.be/downloads/third-party-downloads/orthanc-framework/${ORTHANC_FRAMEMORK_FILENAME}")
+    else()
+      set(ORTHANC_FRAMEWORK_URL "https://orthanc.uclouvain.be/downloads/sources/orthanc/${ORTHANC_FRAMEMORK_FILENAME}")
+    endif()
   endif()
 
   set(ORTHANC_FRAMEWORK_ARCHIVE "${CMAKE_SOURCE_DIR}/ThirdPartyDownloads/${ORTHANC_FRAMEMORK_FILENAME}")
--- a/Resources/Orthanc/CMake/DownloadPackage.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/CMake/DownloadPackage.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -101,19 +101,26 @@
       message(FATAL_ERROR "CMake is not allowed to download from Internet. Please set the ALLOW_DOWNLOADS option to ON")
     endif()
 
-    if ("${MD5}" STREQUAL "no-check")
-      message(WARNING "Not checking the MD5 of: ${Url}")
-      file(DOWNLOAD "${Url}" "${TMP_PATH}"
-        SHOW_PROGRESS TIMEOUT 300 INACTIVITY_TIMEOUT 60
-        STATUS Failure)
-    else()
-      file(DOWNLOAD "${Url}" "${TMP_PATH}"
-        SHOW_PROGRESS TIMEOUT 300 INACTIVITY_TIMEOUT 60
-        EXPECTED_MD5 "${MD5}" STATUS Failure)
-    endif()
+    foreach (retry RANGE 1 5)   # Retries 5 times
+      if ("${MD5}" STREQUAL "no-check")
+        message(WARNING "Not checking the MD5 of: ${Url}")
+        file(DOWNLOAD "${Url}" "${TMP_PATH}"
+          SHOW_PROGRESS TIMEOUT 30 INACTIVITY_TIMEOUT 10
+          STATUS Failure)
+      else()
+        file(DOWNLOAD "${Url}" "${TMP_PATH}"
+          SHOW_PROGRESS TIMEOUT 30 INACTIVITY_TIMEOUT 10
+          EXPECTED_MD5 "${MD5}" STATUS Failure)
+      endif()
 
-    list(GET Failure 0 Status)
+      list(GET Failure 0 Status)
+      if (Status EQUAL 0)
+        break()  # Successful download
+      endif()
+    endforeach()
+
     if (NOT Status EQUAL 0)
+      file(REMOVE ${TMP_PATH})
       message(FATAL_ERROR "Cannot download file: ${Url}")
     endif()
     
--- a/Resources/Orthanc/CMake/GoogleTestConfiguration.cmake	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/CMake/GoogleTestConfiguration.cmake	Mon Jan 22 10:00:28 2024 +0100
@@ -50,7 +50,7 @@
 
 elseif (STATIC_BUILD OR NOT USE_SYSTEM_GOOGLE_TEST)
   set(GOOGLE_TEST_SOURCES_DIR ${CMAKE_BINARY_DIR}/googletest-release-1.8.1)
-  set(GOOGLE_TEST_URL "https://orthanc.uclouvain.be/third-party-downloads/gtest-1.8.1.tar.gz")
+  set(GOOGLE_TEST_URL "https://orthanc.uclouvain.be/downloads/third-party-downloads/gtest-1.8.1.tar.gz")
   set(GOOGLE_TEST_MD5 "2e6fbeb6a91310a16efe181886c59596")
 
   DownloadPackage(${GOOGLE_TEST_MD5} ${GOOGLE_TEST_URL} "${GOOGLE_TEST_SOURCES_DIR}")
--- a/Resources/Orthanc/Databases/ISqlLookupFormatter.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/Databases/ISqlLookupFormatter.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -32,15 +32,16 @@
 
 #if ORTHANC_BUILDING_SERVER_LIBRARY == 1
 #  include "../../../OrthancFramework/Sources/OrthancException.h"
+#  include "../../../OrthancFramework/Sources/Toolbox.h"
 #else
 #  include <OrthancException.h>
+#  include <Toolbox.h>
 #endif
 
 #include "DatabaseConstraint.h"
 
 #include <boost/lexical_cast.hpp>
 #include <list>
-#include <Toolbox.h>
 
 
 namespace Orthanc
@@ -303,14 +304,11 @@
     }
   }
 
-  static bool FormatComparisonExperimental(std::string& target,
-                                           ISqlLookupFormatter& formatter,
-                                           const DatabaseConstraint& constraint,
-                                           //size_t index,
-                                           bool escapeBrackets)
+  static bool FormatComparison2(std::string& target,
+                                ISqlLookupFormatter& formatter,
+                                const DatabaseConstraint& constraint,
+                                bool escapeBrackets)
   {
-    //std::string tag = "t" + boost::lexical_cast<std::string>(index);
-
     std::string comparison;
     std::string tagFilter = ("tagGroup = " + boost::lexical_cast<std::string>(constraint.GetTag().GetGroup())
                               + " AND tagElement = " + boost::lexical_cast<std::string>(constraint.GetTag().GetElement()));
@@ -635,7 +633,7 @@
     {
       std::string comparison;
       
-      if (FormatComparisonExperimental(comparison, formatter, lookup[i], escapeBrackets))
+      if (FormatComparison2(comparison, formatter, lookup[i], escapeBrackets))
       {
         if (!comparison.empty())
         {
--- a/Resources/Orthanc/Databases/ISqlLookupFormatter.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/Databases/ISqlLookupFormatter.h	Mon Jan 22 10:00:28 2024 +0100
@@ -59,7 +59,7 @@
     /**
      * Whether to escape '[' and ']', which is only needed for
      * MSSQL. New in Orthanc 1.10.0, from the following changeset:
-     * https://hg.orthanc-server.com/orthanc-databases/rev/389c037387ea
+     * https://orthanc.uclouvain.be/hg/orthanc-databases/rev/389c037387ea
      **/
     virtual bool IsEscapeBrackets() const = 0;
 
--- a/Resources/Orthanc/Plugins/OrthancPluginCppWrapper.cpp	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/Plugins/OrthancPluginCppWrapper.cpp	Mon Jan 22 10:00:28 2024 +0100
@@ -79,6 +79,10 @@
     }
   }
 
+  void ResetGlobalContext()
+  {
+    globalContext_ = NULL;
+  }
 
   bool HasGlobalContext()
   {
@@ -1670,15 +1674,16 @@
       return true;
     }
 
+#ifdef _MSC_VER
+#define ORTHANC_SCANF sscanf_s
+#else
+#define ORTHANC_SCANF sscanf
+#endif
+
     // Parse the version
-    int aa, bb, cc;
-    if (
-#ifdef _MSC_VER
-      sscanf_s
-#else
-      sscanf
-#endif
-      (version, "%4d.%4d.%4d", &aa, &bb, &cc) != 3 ||
+    int aa, bb, cc = 0;
+    if ((ORTHANC_SCANF(version, "%4d.%4d.%4d", &aa, &bb, &cc) != 3 &&
+         ORTHANC_SCANF(version, "%4d.%4d", &aa, &bb) != 2) ||
       aa < 0 ||
       bb < 0 ||
       cc < 0)
@@ -3752,6 +3757,27 @@
 #endif
 
 
+#if ORTHANC_PLUGINS_VERSION_IS_ABOVE(1, 12, 1)
+  DicomInstance* DicomInstance::Load(const std::string& instanceId,
+                                     OrthancPluginLoadDicomInstanceMode mode)
+  {
+    OrthancPluginDicomInstance* instance = OrthancPluginLoadDicomInstance(
+      GetGlobalContext(), instanceId.c_str(), mode);
+
+    if (instance == NULL)
+    {
+      ORTHANC_PLUGINS_THROW_EXCEPTION(Plugin);
+    }
+    else
+    {
+      boost::movelib::unique_ptr<DicomInstance> result(new DicomInstance(instance));
+      result->toFree_ = true;
+      return result.release();
+    }
+  }
+#endif
+
+
 #if HAS_ORTHANC_PLUGIN_WEBDAV == 1
   static std::vector<std::string> WebDavConvertPath(uint32_t pathSize,
                                                     const char* const*  pathItems)
--- a/Resources/Orthanc/Plugins/OrthancPluginCppWrapper.h	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/Orthanc/Plugins/OrthancPluginCppWrapper.h	Mon Jan 22 10:00:28 2024 +0100
@@ -137,6 +137,8 @@
 
   void SetGlobalContext(OrthancPluginContext* context);
 
+  void ResetGlobalContext();
+
   bool HasGlobalContext();
 
   OrthancPluginContext* GetGlobalContext();
@@ -1264,6 +1266,11 @@
 
     ~DicomInstance();
 
+    const OrthancPluginDicomInstance* GetObject() const
+    {
+      return instance_;
+    }
+
     std::string GetRemoteAet() const;
 
     const void* GetBuffer() const
@@ -1318,6 +1325,11 @@
                                     size_t size,
                                     const std::string& transferSyntax);
 #endif
+
+#if ORTHANC_PLUGINS_VERSION_IS_ABOVE(1, 12, 1)
+    static DicomInstance* Load(const std::string& instanceId,
+                               OrthancPluginLoadDicomInstanceMode mode);
+#endif
   };
 
 // helper method to convert Http headers from the plugin SDK to a std::map
--- a/Resources/SyncOrthancFolder.py	Wed Sep 13 13:32:54 2023 +0200
+++ b/Resources/SyncOrthancFolder.py	Mon Jan 22 10:00:28 2024 +0100
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
 
 #
 # This maintenance script updates the content of the "Orthanc" folder
@@ -19,7 +19,7 @@
 PLUGIN_SDK_VERSION_OLD = [ '0.9.5', '1.4.0', '1.5.2', '1.5.4' ]
 PLUGIN_SDK_VERSION_NEW = [ '1.9.2', '1.12.0' ]
 HAS_PROTOCOL_BUFFERS = [ '1.12.0' ]
-REPOSITORY = 'https://hg.orthanc-server.com/orthanc/raw-file'
+REPOSITORY = 'https://orthanc.uclouvain.be/hg/orthanc/raw-file'
 
 FILES = [
     ('default', 'OrthancFramework/Resources/CMake/AutoGeneratedCode.cmake', 'CMake'),
--- a/SQLite/CMakeLists.txt	Wed Sep 13 13:32:54 2023 +0200
+++ b/SQLite/CMakeLists.txt	Mon Jan 22 10:00:28 2024 +0100
@@ -97,6 +97,8 @@
   COMPILE_FLAGS -DORTHANC_ENABLE_LOGGING_PLUGIN=1
   )
 
+DefineSourceBasenameForTarget(OrthancSQLiteIndex)
+
 install(
   TARGETS OrthancSQLiteIndex  # OrthancSQLiteStorage  TODO
   RUNTIME DESTINATION lib    # Destination for Windows
@@ -117,3 +119,5 @@
 set_target_properties(UnitTests PROPERTIES
   COMPILE_FLAGS -DORTHANC_ENABLE_LOGGING_PLUGIN=0
   )
+
+DefineSourceBasenameForTarget(UnitTests)
\ No newline at end of file
--- a/TODO	Wed Sep 13 13:32:54 2023 +0200
+++ b/TODO	Mon Jan 22 10:00:28 2024 +0100
@@ -22,7 +22,7 @@
 
 * Implement the "StoreDicom" option from the default filesystem
   storage area to run MySQL/PostgreSQL storage in index-only mode:
-  https://book.orthanc-server.com/contributing.html
+  https://orthanc.uclouvain.be/book/contributing.html
 
 * Performance: Add a pool of connections to PostgreSQL/MySQL, as
   already done in the database index plugins