diff --git a/.github/workflows/cbrain_ci.yaml b/.github/workflows/cbrain_ci.yaml
index 6495e1ec4..9f76043fb 100644
--- a/.github/workflows/cbrain_ci.yaml
+++ b/.github/workflows/cbrain_ci.yaml
@@ -24,7 +24,7 @@ jobs:
###########################################################
services:
mariadb:
- image: mariadb/server
+ image: mariadb
env: # the docker container's autosetup use MYSQL_ variables
MYSQL_ROOT_PASSWORD: that_is_nothing
MYSQL_DATABASE: cbrain_test
@@ -38,13 +38,13 @@ jobs:
###########################################################
- name: Checkout CBRAIN Codebase
- uses: actions/checkout@v2
+ uses: actions/checkout@v4
###########################################################
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
- ruby-version: 2.6.6
+ ruby-version: 2.7.2
###########################################################
- name: Setup BrainPortal And Bourreau Names
@@ -67,7 +67,7 @@ jobs:
###########################################################
- name: Reload Cached Gems
- uses: actions/cache@v1 # speeds up 'Prepare Ruby Gems' below
+ uses: actions/cache@v3 # speeds up 'Prepare Ruby Gems' below
with:
path: gem-cache
key: ${{ runner.os }}-gems-${{ hashFiles('*/Gemfile') }}
diff --git a/.travis.yml.old b/.travis.yml.old
deleted file mode 100644
index bf8e7cc23..000000000
--- a/.travis.yml.old
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# This used to be our Travis CI test script.
-# This has been replaced by GitHub actions.
-#
-services:
-- docker
-script:
-- bash Travis/travis_ci.sh
-notifications:
- slack:
- rooms:
- - secure: "gtJ4qanurja+/Ew0ChaeStjsZVC65nGC1uVPLEnZo05q0q8EsmMXpUCvnWSJu7xl42mrivOEmNns6xrrevGuKg0PiVZtKaj9njCGT2SRYZcvrAVZl17Q5pnNIFeRkmdiKp18sE071FKS11a8M4+K2nV1o6eupfOqNUWAWAmQAdI="
- - secure: "UEz9wB+m2nih2YnsBBLQMfGHBuYJ3WlsOy/IymWomog4ViBq7Q5UOW5RbtNukWhqJp8A/klieGIOcl2yTK3glyW3f04WDzg5zoVrTNVWsiwmRTa7L5D9+sw7/wlx9KE3x3ogYYgq77JkC33TQ9sOL8bMHYNbGVbiUuj+W08TsbSt8u7CR4XDIN3Fr3jpKXISoTxmG1rHyhlg1oO0Zb9h8IQ+8ySQo/6ecPhtyflv4C27p7RS3V+H9Ynu9kKd/gwsLtrcl61DxlD3LDipwYoCCQhrHX1CBD1Ti1h9ZoW1wwUuzxsdliWF+VQvNYSkTRV/yFmUmQRGxbzB3wkCmepOwgnhzU93TPxl02mzDjSkqYkh1JQ+/9JBHXiNQKamYWrNShJc3nnH98E7oeGvbHvbUdu3J+XT2JYbjTY4UdhCKlzw4ZifKpg2wIXXpH9bYKSo2LLJv4ocKaAUC2C+/BjKel/wd4FkYZix4Q58ISPfm51rytWJJvrbY7DODxcKmbVR3//8Jh+3/VFan+MOkH+AO7vjG7Gxym7jYwz2X9bElCMAR6NHH8EVeRSzzoSD8dYIQz8p/OSyFVN7CfR568+QisUhzzQ03TdCMwoNfGq97OKnWulXRKhqXSV3FktnOFJbJiHCf9cusDvZVApWqN7bEyO0bvstwwrRW7Zozcok9j8="
- - secure: "PI4Mo1Top2Nr4LFA19iHjK/abPhNKguXsbvcEEK8FLptzApeyorLb3/sgCXE6BRghHpK+lf02LkFNXaOvSG6myJrM5lP4XPpWBwCIfntpdbebzGet8pp6BZ19EO7tV75ckjPInBqQUs4Zo+SYxA65UJffCO094DSrReLCe6DTyFN/JGAZrV/AbK3g4bcw6ne6IIRa3r/o2tA96Pw9PJDzmAu7XwARZvmtsj/vg/UdwSsih7Hq1gDsQVCKfFfqQS2kUpF0HCVWZyghRbvEa26xH/hPR3ZNL4KLGf0GhFJV6aA19JhmdvvIpklZK1VuXrI/xO0Pt1en/GvSbRCD9Ngj1VY8aujelflmHIHJhAoJXwl25HLMHf+EYfMQYNdHXkARmL+rVLt1RqfEqmlfdBdbzlz2fP9VcZyugP12vqoTLIosNWXvxUVh1qyp63v3Kg1I6W4Vbm1F6ll8ChCR2uPVEebr7Px9eaa3Q8z6zb96a0DkPu1Q0AziJs3bu5gtufHjqdgqLfTjG/dROTeswMV7htOe8noLDMIO9qlS2qFJ6pb43aMFonMFHVjPDkRv9GWvTqQSdjAAcyF43ZN7Kizu6/LWrSaOF0D6ONQxa0KiMhtxVQc//walqN/InU3PNHIVXsGTqznffqLejVFcm2EVoxu/0abr1f+Tx2EthlBOgk="
diff --git a/Bourreau/Gemfile b/Bourreau/Gemfile
index cb0798fb2..3094bcea8 100644
--- a/Bourreau/Gemfile
+++ b/Bourreau/Gemfile
@@ -31,6 +31,7 @@ gem 'rails', '~> 5.0.6'
# Network components mostly
gem "net-ssh", :require => 'net/ssh'
gem "net-sftp", :require => 'net/sftp'
+gem "x25519"
gem "rbnacl"
gem "ed25519"
gem "rbnacl-libsodium"
@@ -70,7 +71,8 @@ end
group :test do
gem "rspec"
gem "rspec-rails"
- gem "factory_bot_rails", :require => false
+ gem "factory_bot", '6.4.2', :require => false
+ gem "factory_bot_rails", '6.4.2', :require => false
gem "rails-controller-testing"
gem "rspec-mocks"
gem "rspec-activemodel-mocks"
diff --git a/Bourreau/app/models/bourreau_worker.rb b/Bourreau/app/models/bourreau_worker.rb
index 231c0564c..f37789965 100644
--- a/Bourreau/app/models/bourreau_worker.rb
+++ b/Bourreau/app/models/bourreau_worker.rb
@@ -212,7 +212,9 @@ def process_task_list(tasks_todo_rel) #:nodoc:
user_max_tasks = @rr.meta["task_limit_user_#{user_id}".to_sym]
user_max_tasks = @rr.meta[:task_limit_user_default] if user_max_tasks.blank?
user_max_tasks = user_max_tasks.to_i # nil, "" and "0" means unlimited
- user_tasks = by_user[user_id].shuffle # go through tasks in random order
+ # Go through tasks in random order, but with non-New states having higher priority
+ user_tasks = (by_user[user_id].select { |t| t.status == 'New' }).shuffle +
+ (by_user[user_id].select { |t| t.status != 'New' }).shuffle # tasks are pop()ed
# Loop for each task
while user_tasks.size > 0
diff --git a/Bourreau/app/models/squashifier_en_cbrain_ssh_data_provider.rb b/Bourreau/app/models/squashifier_en_cbrain_ssh_data_provider.rb
new file mode 120000
index 000000000..59016b55a
--- /dev/null
+++ b/Bourreau/app/models/squashifier_en_cbrain_ssh_data_provider.rb
@@ -0,0 +1 @@
+../../../BrainPortal/app/models/squashifier_en_cbrain_ssh_data_provider.rb
\ No newline at end of file
diff --git a/Bourreau/config/initializers/validation_bourreau.rb b/Bourreau/config/initializers/validation_bourreau.rb
index a43888a18..6cce6f35a 100644
--- a/Bourreau/config/initializers/validation_bourreau.rb
+++ b/Bourreau/config/initializers/validation_bourreau.rb
@@ -52,7 +52,10 @@
puts "C> \t environment variable 'CBRAIN_SKIP_VALIDATIONS' to '1'.\n"
CbrainSystemChecks.check(:all)
BourreauSystemChecks.check([
- :a050_ensure_proper_cluster_management_layer_is_loaded, :z000_ensure_we_have_a_forwarded_ssh_agent,
+ :a000_ensure_models_are_preloaded,
+ :a005_ensure_boutiques_descriptors_are_loaded,
+ :a050_ensure_proper_cluster_management_layer_is_loaded,
+ :z000_ensure_we_have_a_forwarded_ssh_agent,
])
end
Process.setproctitle "CBRAIN Console #{RemoteResource.current_resource.class} #{RemoteResource.current_resource.name} #{CBRAIN::Instance_Name}"
diff --git a/Bourreau/lib/bourreau_system_checks.rb b/Bourreau/lib/bourreau_system_checks.rb
index 7b13161ac..b0d243c70 100644
--- a/Bourreau/lib/bourreau_system_checks.rb
+++ b/Bourreau/lib/bourreau_system_checks.rb
@@ -484,6 +484,23 @@ def self.a100_ensure_dp_cache_symlink_exists #:nodoc:
+ def self.a110_ensure_task_class_git_commits_cached
+
+ #----------------------------------------------------------------------------
+ puts "C> Ensuring git commits for tasks classes are pre-cached..."
+ #----------------------------------------------------------------------------
+
+ myself = RemoteResource.current_resource
+
+ ToolConfig.where(:bourreau_id => myself.id)
+ .map {|tc| tc.cbrain_task_class rescue nil}
+ .uniq
+ .compact # to remove the nil
+ .each { |klass| klass.revision_info.self_update }
+ end
+
+
+
def self.z000_ensure_we_have_a_forwarded_ssh_agent #:nodoc:
#----------------------------------------------------------------------------
@@ -501,4 +518,3 @@ def self.z000_ensure_we_have_a_forwarded_ssh_agent #:nodoc:
end
end
-
diff --git a/Bourreau/lib/boutiques_collection_basenames_list_maker.rb b/Bourreau/lib/boutiques_collection_basenames_list_maker.rb
new file mode 120000
index 000000000..0d8942f80
--- /dev/null
+++ b/Bourreau/lib/boutiques_collection_basenames_list_maker.rb
@@ -0,0 +1 @@
+../../BrainPortal/lib/boutiques_collection_basenames_list_maker.rb
\ No newline at end of file
diff --git a/Bourreau/lib/boutiques_ext3_capturer.rb b/Bourreau/lib/boutiques_ext3_capturer.rb
new file mode 120000
index 000000000..d33b38e1c
--- /dev/null
+++ b/Bourreau/lib/boutiques_ext3_capturer.rb
@@ -0,0 +1 @@
+../../BrainPortal/lib/boutiques_ext3_capturer.rb
\ No newline at end of file
diff --git a/Bourreau/lib/boutiques_input_value_fixer.rb b/Bourreau/lib/boutiques_input_value_fixer.rb
new file mode 120000
index 000000000..153073c7c
--- /dev/null
+++ b/Bourreau/lib/boutiques_input_value_fixer.rb
@@ -0,0 +1 @@
+../../BrainPortal/lib/boutiques_input_value_fixer.rb
\ No newline at end of file
diff --git a/Bourreau/lib/boutiques_save_std_out_std_err.rb b/Bourreau/lib/boutiques_save_std_out_std_err.rb
new file mode 120000
index 000000000..7336628e0
--- /dev/null
+++ b/Bourreau/lib/boutiques_save_std_out_std_err.rb
@@ -0,0 +1 @@
+../../BrainPortal/lib/boutiques_save_std_out_std_err.rb
\ No newline at end of file
diff --git a/Bourreau/lib/boutiques_task_logs_copier.rb b/Bourreau/lib/boutiques_task_logs_copier.rb
new file mode 120000
index 000000000..b84290e0c
--- /dev/null
+++ b/Bourreau/lib/boutiques_task_logs_copier.rb
@@ -0,0 +1 @@
+../../BrainPortal/lib/boutiques_task_logs_copier.rb
\ No newline at end of file
diff --git a/Bourreau/lib/data_provider_test_connection_error.rb b/Bourreau/lib/data_provider_test_connection_error.rb
new file mode 120000
index 000000000..ee01c5c55
--- /dev/null
+++ b/Bourreau/lib/data_provider_test_connection_error.rb
@@ -0,0 +1 @@
+../../BrainPortal/lib/data_provider_test_connection_error.rb
\ No newline at end of file
diff --git a/BrainPortal/Gemfile b/BrainPortal/Gemfile
index 58571094f..95c062d97 100644
--- a/BrainPortal/Gemfile
+++ b/BrainPortal/Gemfile
@@ -31,10 +31,13 @@ gem 'rails', '~> 5.0.6'
# Network components mostly
gem "net-ssh", :require => 'net/ssh'
gem "net-sftp", :require => 'net/sftp'
-gem "rbnacl"
+gem "x25519"
gem "ed25519"
-gem "rbnacl-libsodium"
gem "bcrypt_pbkdf"
+# You can comment-out the following two gems if you don't mind not having 'userkey' data providers.
+# They are usually the most tricky to compile and integrate into the app.
+gem "rbnacl"
+gem "rbnacl-libsodium"
# Other utilities
gem "sys-proctable", '~> 1.2.0'
@@ -46,6 +49,7 @@ gem "json-schema"
gem "aws-sdk-s3", '~> 1'
gem 'zenodo_client', '>= 1.0.0', :git => 'https://github.com/aces/zenodo-client-gem'
gem 'jwt'
+gem "rack-cors"
# User interface gems
gem "will_paginate"
@@ -65,7 +69,7 @@ gem "activemodel-serializers-xml"
gem 'sassc-rails' # "rake assets:precompile" fails if this gems is put into the :assets group
group :assets do
- gem 'therubyracer'
+ #gem 'therubyracer' # very old; consider installing ANY other JS engine, like 'node'. If not, try 'mini_racer'.
gem 'coffee-rails'
gem 'uglifier'
end
@@ -90,7 +94,8 @@ end
group :test do
gem "rspec"
gem "rspec-rails"
- gem "factory_bot_rails", :require => false
+ gem "factory_bot", '6.4.2', :require => false
+ gem "factory_bot_rails", '6.4.2', :require => false
gem "rails-controller-testing"
gem "rspec-mocks"
gem "rspec-activemodel-mocks"
diff --git a/BrainPortal/app/assets/stylesheets/cbrain.css.erb b/BrainPortal/app/assets/stylesheets/cbrain.css.erb
index cc28238fb..b925a5dd2 100644
--- a/BrainPortal/app/assets/stylesheets/cbrain.css.erb
+++ b/BrainPortal/app/assets/stylesheets/cbrain.css.erb
@@ -3,7 +3,7 @@
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -441,9 +441,9 @@ table.bordered {
table.public_tools_list {
border: 0px;
- margin-left: auto;
- margin-right: auto;
- margin-bottom: 2em;
+ display: table-cell;
+ padding-bottom: 1em;
+ padding-right: 1em;
}
table.public_tools_list tr {
@@ -458,7 +458,7 @@ table.public_tools_list th {
table.public_tools_list td {
border: 0px;
border-bottom: 0.2em solid #999;
- max-width: 60em;
+ max-width: 30em;
}
@@ -804,6 +804,10 @@ pre {
line-height: 1.3em;
border: 0.2em solid black;
padding: 0.4em;
+ word-break: break-all;
+ overflow-wrap: break-word;
+ text-align: left;
+ table-layout: fixed;
}
.script_preview {
@@ -880,7 +884,7 @@ pre {
}
.errorExplanation {
- width: 400px;
+ width: 600px;
border: 2px solid red;
padding: 7px;
padding-bottom: 12px;
@@ -1138,18 +1142,16 @@ pre {
float: left;
text-align: center;
text-decoration: none;
- color: white;
padding: 0.4em;
margin: 2em ;
width: 18em;
height: 9em;
- border: 0.4em gray solid;
+ border: 0.5em black solid;
border-radius: 1em;
cursor: pointer;
}
-.project_button h4{
- color: white;
+.project_button h4 {
font-weight: bold;
border-bottom: none;
margin-bottom: 0;
@@ -1163,13 +1165,24 @@ pre {
.project_button_bottom_link {
font-size: 10px;
- color: white;
font-weight: bold;
text-decoration: none;
position: absolute;
bottom: 1px;
}
+.project_user_count {
+ color: #660000;
+}
+
+.project_files_count {
+ color: #006600;
+}
+
+.project_tasks_count {
+ color: #000066;
+}
+
.project_button_description {
font-style: italic;
line-height: 1.2em;
@@ -1180,48 +1193,30 @@ pre {
}
.project_edit_button {
- left: 5px;
+ left: 1em;
}
.project_delete_button {
- right: 5px;
+ right: 1em;
}
-.system_project_point { color: #d41c1c; }
-.system_project { background: #d41c1c; }
-.system_project:active { background: #d41c1c; }
-
-.everyone_project_point { color: #af0b0b; }
-.everyone_project { background: #af0b0b; }
-.everyone_project:active { background: #af0b0b; }
-
-.site_project_point { color: #8c0953; }
-.site_project { background: #8c0953; }
-.site_project:active { background: #8c0953; }
+.everyone_project { background: #bbbbbb; }
+.everyone_project:active { background: #bbbbbb; }
-.user_project_point { color: #500d75; }
-.user_project { background: #500d75; }
-.user_project:active { background: #500d75; }
+.site_project { background: #dbf7cc; }
+.site_project:active { background: #dbf7cc; }
-.shared_project_point { color: #00bf09; }
-.shared_project { background: #00bf09; }
-.shared_project:active { background: #00bf09; }
+.user_project { background: #bbbbbb; }
+.user_project:active { background: #bbbbbb; }
-.public_project_point { color: #9e5400; }
-.public_project { background: #9e5400; }
-.public_project:active { background: #9e5400; }
+.shared_project { background: #A5D1DF; }
+.shared_project:active { background: #A5D1DF; }
-.personal_project_point { color: #008686; }
-.personal_project { background: #008686; }
-.personal_project:active { background: #008686; }
+.public_project { background: #fdcece; }
+.public_project:active { background: #fdcece; }
-.invisible_project_point { color: #2b97c1; }
-.invisible_project { background: #2b97c1; }
-.invisible_project:active { background: #2b97c1; }
-
-.empty_project_point { color: #d7ca0a; }
-.empty_project { background: #d7ca0a; }
-.empty_project:active { background: #d7ca0a; }
+.private_project { background: #eeeeee; }
+.private_project:active { background: #eeeeee; }
.giant {
margin-top: 0.3em;
@@ -2460,6 +2455,7 @@ img {
color: black;
padding: 5px;
border: 1px solid black;
+ max-width: 42em;
}
.white_bg {
@@ -2623,6 +2619,26 @@ img {
display: table-cell;
}
+/* % ######################################################### */
+/* % Disk Quota Styles */
+/* % ######################################################### */
+
+.disk_quota_user_quota_highlight {
+ background-color: #ffd; /* light yellow */
+}
+
+.disk_quota_dp_quota_highlight {
+ background-color: #ffd; /* light yellow */
+}
+
+.disk_quota_exceed_bytes {
+ background-color: #fdd; /* light pink */
+}
+
+.disk_quota_exceed_files {
+ background-color: #fdd; /* light pink */
+}
+
/* % ######################################################### */
/* % Report Generator Styles */
/* % ######################################################### */
diff --git a/BrainPortal/app/assets/stylesheets/neurohub.scss.erb b/BrainPortal/app/assets/stylesheets/neurohub.scss.erb
index 1010e4ae1..ef6aeab25 100644
--- a/BrainPortal/app/assets/stylesheets/neurohub.scss.erb
+++ b/BrainPortal/app/assets/stylesheets/neurohub.scss.erb
@@ -3220,6 +3220,7 @@ $DASHBOARD_CARD_MIN_WIDTH: 25;
border: 1px solid $DEFAULT_ALT;
border-radius: 2px;
overflow: scroll;
+ box-sizing: border-box;
}
.license-list {
background: $PRIMARY_WASH;
diff --git a/BrainPortal/app/controllers/data_providers_controller.rb b/BrainPortal/app/controllers/data_providers_controller.rb
index e1113e722..24cd5e3f7 100644
--- a/BrainPortal/app/controllers/data_providers_controller.rb
+++ b/BrainPortal/app/controllers/data_providers_controller.rb
@@ -2,7 +2,7 @@
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -28,11 +28,12 @@ class DataProvidersController < ApplicationController
Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
api_available :only => [ :index, :show, :is_alive,
- :browse, :register, :unregister, :delete ]
+ :browse, :register, :unregister, :delete,
+ :create_personal, :check_personal, ]
before_action :login_required
before_action :manager_role_required, :only => [:new, :create]
- before_action :admin_role_required, :only => [:report, :repair]
+ before_action :admin_role_required, :only => [:new, :create, :report, :repair]
def index #:nodoc:
@scope = scope_from_session
@@ -61,7 +62,6 @@ def index #:nodoc:
def show #:nodoc:
data_provider_id = params[:id]
@provider = DataProvider.find(data_provider_id)
-
cb_notice "Provider not accessible by current user." unless @provider.can_be_accessed_by?(current_user)
respond_to do |format|
@@ -86,8 +86,8 @@ def new #:nodoc:
@typelist = get_type_list
end
- def create #:nodoc:
- @provider = DataProvider.sti_new(data_provider_params)
+ def create #:nodoc:
+ @provider = DataProvider.sti_new(data_provider_params)
@provider.user_id ||= current_user.id # disabled field in form DOES NOT send value!
@provider.group_id ||= current_assignable_group.id
@@ -110,6 +110,49 @@ def create #:nodoc:
end
end
+ def new_personal #:nodoc:
+ provider_group_id = current_assignable_group.id
+ @provider = UserkeyFlatDirSshDataProvider.new( :user_id => current_user.id,
+ :group_id => provider_group_id,
+ :online => true,
+ :read_only => false
+ )
+ @groups = current_user.assignable_groups
+ end
+
+ # create by normal user, only UserkeyFlatDirSshDataProvider
+ def create_personal
+ normal_params = params.require_as_params(:data_provider)
+ .permit(:name, :description, :group_id,
+ :remote_user, :remote_host,
+ :remote_port, :remote_dir
+ )
+ group_id = normal_params[:group_id]
+ current_user.assignable_group_ids.find(group_id) # ensure assignable, not sure need check visibility etc more
+ @provider = UserkeyFlatDirSshDataProvider.new(normal_params)
+ @provider.user_id = current_user.id # prevent creation of dp on behalf of other users
+
+ if ! @provider.save
+ @groups = current_user.assignable_groups
+ respond_to do |format|
+ format.html { render :action => :new_personal}
+ format.json { render :json => @provider.errors, :status => :unprocessable_entity }
+ end
+ return
+ end
+
+ @provider.addlog_context(self, "Created by #{current_user.login}")
+ @provider.meta[:browse_gid] = current_user.own_group.id
+ flash[:notice] = "Provider successfully created. Please click the Test Configuration button."\
+ " This will run tests on the current storage configuration. Note that if these tests fail,"\
+ " the storage will be marked 'offline'."
+
+ respond_to do |format|
+ format.html { redirect_to :action => :show, :id => @provider.id}
+ format.json { render :json => @provider }
+ end
+ end
+
def update #:nodoc:
@user = current_user
id = params[:id]
@@ -125,35 +168,29 @@ def update #:nodoc:
return
end
- new_data_provider_attr = data_provider_params
+ # hacking prevention
+ # this guaranties that users do not change group to something
+ group_id = params[:group_id]
+ current_user.assignable_group_ids.find(group_id) if ! current_user.has_role?(:admin_user)
+
+ new_data_provider_attr = data_provider_params(@provider)
new_data_provider_attr.delete :type # Type cannot be updated once it is set.
# Fields that stay the same if the form provides a blank entry:
new_data_provider_attr.delete :cloud_storage_client_token if new_data_provider_attr[:cloud_storage_client_token].blank?
- if @provider.update_attributes_with_logging(new_data_provider_attr, current_user,
- %w(
- remote_user remote_host remote_port remote_dir
- not_syncable cloud_storage_client_identifier cloud_storage_client_token
- cloud_storage_client_bucket_name cloud_storage_client_path_start
- cloud_storage_endpoint cloud_storage_region
- datalad_repository_url datalad_relative_path
- containerized_path
- )
- )
+ if @provider.update_attributes_with_logging(new_data_provider_attr, current_user, @provider.attributes.keys)
meta_flags_for_restrictions = (params[:meta] || {}).keys.grep(/\Adp_no_copy_\d+\z|\Arr_no_sync_\d+\z/)
add_meta_data_from_form(@provider, [:must_move, :no_uploads, :no_viewers, :browse_gid] + meta_flags_for_restrictions)
flash[:notice] = "Provider successfully updated."
respond_to do |format|
format.html { redirect_to :action => :show }
- format.xml { render :xml => @provider }
- format.json { render :json => @provider }
+ format.json { render :json => @provider }
end
else
@provider.reload
respond_to do |format|
format.html { render :action => 'show' }
- format.xml { render :xml => @provider.errors, :status => :unprocessable_entity }
format.json { render :json => @provider.errors, :status => :unprocessable_entity }
end
end
@@ -819,9 +856,59 @@ def repair
end
end
+ # This action checks that the remote side of a Ssh DataProvider is
+ # accessible using SSH. Regretfully, does not guaranty that connection is possible.
+ # If check fails it raises an exception of class DataProviderTestConnectionError
+ def check_personal
+ id = params[:id]
+ @provider = DataProvider.find(id)
+ unless @provider.has_owner_access?(current_user)
+ flash[:error] = "You cannot check a provider that you do not own."
+ respond_to do |format|
+ format.html { redirect_to :action => :show }
+ format.xml { head :forbidden }
+ format.json { head :forbidden }
+ end
+ return
+ end
+
+ unless @provider.is_a? SshDataProvider
+ flash[:error] = "Presently, detailed check is only available to ssh providers."
+ respond_to do |format|
+ format.html { redirect_to :action => :show }
+ format.xml { head :forbidden }
+ format.json { head :forbidden }
+ end
+ return
+ end
+
+ # Do active checks of the connection. Will
+ # raise DataProviderTestConnectionError if anything is wrong.
+ @provider.check_connection!
+
+ # Ok, all is well.
+ @provider.update_column(:online, true)
+ flash[:notice] = "The configuration was tested and seems to be operational."
+
+ respond_to do |format|
+ format.html { redirect_to :action => :show }
+ format.json { render :json => 'ok' }
+ end
+
+ rescue DataProviderTestConnectionError => ex
+ flash[:error] = ex.message
+ flash[:error] += "\nThis storage is marked as 'offline' until this test pass."
+ @provider.update_column(:online, false)
+
+ respond_to do |format|
+ format.html { redirect_to :action => :show }
+ format.json { render :json => 'not ok' }
+ end
+ end
+
private
- def data_provider_params #:nodoc:
+ def data_provider_params(for_data_provider=nil) #:nodoc:
if current_user.has_role?(:admin_user)
params.require_as_params(:data_provider).permit(
:name, :user_id, :group_id, :remote_user, :remote_host, :alternate_host,
@@ -836,17 +923,15 @@ def data_provider_params #:nodoc:
:license_agreements,
:containerized_path
)
- else
- # Normal users are not allowed to change
- # some parameters that would allow them to access things
- # they don't control.
+ elsif for_data_provider.is_a?(UserkeyFlatDirSshDataProvider)
params.require_as_params(:data_provider).permit(
- :name, :description, :group_id, :time_zone,
- :alternate_host,
+ :name, :description, :group_id,
+ :remote_user, :remote_host, :remote_port, :remote_dir,
:online, :read_only, :not_syncable,
- :datalad_repository_url, :datalad_relative_path,
- :license_agreements,
- :containerized_path
+ )
+ else # place for future expansion; be careful to not introduce security bugs
+ params.require_as_params(:data_provider).permit(
+ :description,
)
end
end
@@ -855,6 +940,8 @@ def get_type_list #:nodoc:
data_provider_list = [ "FlatDirSshDataProvider" ]
if check_role(:site_manager) || check_role(:admin_user)
data_provider_list = DataProvider.descendants.map(&:name)
+ data_provider_list.delete(UserkeyFlatDirSshDataProvider.name) # this type is for regular users
+ # not for admins
end
grouped_options = data_provider_list.to_a.hashed_partitions { |name| name.constantize.pretty_category_name }
grouped_options.delete(nil) # data providers that can not be on this list return a category name of nil, so we remove them
diff --git a/BrainPortal/app/controllers/disk_quotas_controller.rb b/BrainPortal/app/controllers/disk_quotas_controller.rb
index 9eb9117b1..c111f425d 100644
--- a/BrainPortal/app/controllers/disk_quotas_controller.rb
+++ b/BrainPortal/app/controllers/disk_quotas_controller.rb
@@ -124,6 +124,56 @@ def destroy #:nodoc:
redirect_to disk_quotas_path
end
+ # Returns a list of users with exceeded quotas
+ def report #:nodoc:
+ quota_to_user_ids = {} # quota_obj => [uid, uid...]
+
+ # Scan DP-wide quota objects
+ DiskQuota.where(:user_id => 0).all.each do |quota|
+ exceed_size_user_ids = Userfile
+ .where(:data_provider_id => quota.data_provider_id)
+ .group(:user_id)
+ .sum(:size)
+ .select { |user_id,size| size >= quota.max_bytes }
+ .keys
+ exceed_numfiles_user_ids = Userfile
+ .where(:data_provider_id => quota.data_provider_id)
+ .group(:user_id)
+ .sum(:num_files)
+ .select { |user_id,num_files| num_files >= quota.max_files }
+ .keys
+ union_ids = exceed_size_user_ids | exceed_numfiles_user_ids
+ union_ids -= DiskQuota
+ .where(:data_provider_id => quota.data_provider_id, :user_id => union_ids)
+ .pluck(:user_id) # remove user IDs that have their own quota records
+ quota_to_user_ids[quota] = union_ids if union_ids.size > 0
+ end
+
+ # Scan user-specific quota objects
+ DiskQuota.where('user_id > 0').all.each do |quota|
+ quota_to_user_ids[quota] = [ quota.user_id ] if quota.exceeded?
+ end
+
+ # Inverse relation: user_id => [ quota, quota ]
+ user_id_to_quotas = {}
+ quota_to_user_ids.each do |quota,user_ids|
+ user_ids.each do |user_id|
+ user_id_to_quotas[user_id] ||= []
+ user_id_to_quotas[user_id] << quota
+ end
+ end
+
+ # Table content: [ [ user_id, quota ], [user_id, quota] ... ]
+ # Note: the rows are grouped by user_id, but not sorted in any way...
+ @user_id_and_quota = []
+ user_id_to_quotas.each do |user_id, quotas|
+ quotas.each do |quota|
+ @user_id_and_quota << [ user_id, quota ]
+ end
+ end
+
+ end
+
private
def disk_quota_params #:nodoc:
@@ -145,10 +195,11 @@ def base_scope #:nodoc:
scope
end
- # Tries to turn strings like '3 mb' into 3_000_000 etc
- # Supported suffixes are T, G, M, K, TB, GB, MB, KB, B (case insensitive)
+ # Tries to turn strings like '3 mb' into 3_000_000 etc.
+ # Supported suffixes are T, G, M, K, TB, GB, MB, KB, B (case insensitive).
+ # Negative values are parsed, but the DiskQuota model only accepts the special -1
def guess_size_units(sizestring)
- match = sizestring.match /\A\s*(\d*\.?\d+)\s*([tgmk]?)\s*b?\s*\z/i
+ match = sizestring.match(/\A\s*(-?\d*\.?\d+)\s*([tgmk]?)\s*b?\s*\z/i)
return "" unless match # parsing error
number = match[1]
suffix = match[2].presence&.downcase || 'u'
diff --git a/BrainPortal/app/controllers/groups_controller.rb b/BrainPortal/app/controllers/groups_controller.rb
index 7ee6f2c33..57fc7c093 100644
--- a/BrainPortal/app/controllers/groups_controller.rb
+++ b/BrainPortal/app/controllers/groups_controller.rb
@@ -48,21 +48,22 @@ def index #:nodoc:
@scope.custom[:button] = true if
current_user.has_role?(:normal_user) && @scope.custom[:button].nil?
+ view_mode = (@scope.custom[:button].present?) ? :button : :list
+
@base_scope = current_user.listable_groups.includes(:site)
@view_scope = @scope.apply(@base_scope)
- @scope.pagination ||= Scope::Pagination.from_hash({ :per_page => 50 })
- @groups = @scope.pagination.apply(@view_scope)
- @groups = (@groups.to_a << 'ALL') if @scope.custom[:button]
+ if view_mode == :list
+ @scope.pagination ||= Scope::Pagination.from_hash({ :per_page => 50 })
+ @groups = @scope.pagination.apply(@view_scope)
+ else
+ @groups = @view_scope.to_a
+ end
# For regular groups
@group_id_2_userfile_counts = Userfile.find_all_accessible_by_user(current_user, :access_requested => :read).group("group_id").count
@group_id_2_task_counts = CbrainTask.find_all_accessible_by_user(current_user).group("group_id").count
@group_id_2_user_counts = User.joins(:groups).group("group_id").count.convert_keys!(&:to_i) # .joins make keys as string
- @group_id_2_tool_counts = Tool.find_all_accessible_by_user(current_user).group("group_id").count
- @group_id_2_data_provider_counts = DataProvider.find_all_accessible_by_user(current_user).group("group_id").count
- @group_id_2_bourreau_counts = Bourreau.find_all_accessible_by_user(current_user).group("group_id").count
- @group_id_2_brain_portal_counts = BrainPortal.find_all_accessible_by_user(current_user).group("group_id").count
# For `ALL` group
@group_id_2_userfile_counts[nil] = Userfile.find_all_accessible_by_user(current_user, :access_requested => :read).count
@@ -275,12 +276,12 @@ def group_params #:nodoc:
params.require_as_params(:group).permit(
:name, :description, :not_assignable,
:site_id, :creator_id, :invisible, :track_usage,
- :user_ids => []
+ :public, :user_ids => []
)
else # non admin users
params.require_as_params(:group).permit(
:name, :description, :not_assignable,
- :user_ids => []
+ :public, :user_ids => []
)
end
end
diff --git a/BrainPortal/app/controllers/nh_loris_hooks_controller.rb b/BrainPortal/app/controllers/nh_loris_hooks_controller.rb
index 66d4a9867..9679b9990 100644
--- a/BrainPortal/app/controllers/nh_loris_hooks_controller.rb
+++ b/BrainPortal/app/controllers/nh_loris_hooks_controller.rb
@@ -64,6 +64,11 @@ def file_list_maker
s_group = Group.where_id_or_name(source_group_id).first if source_group_id
base = Userfile.where(nil) # As seen in userfiles_controller
base = Userfile.restrict_access_on_query(current_user, base, :access_requested => :read)
+
+ extended_values = ExtendedCbrainFileList.roots_to_fullpaths(source_basenames)
+ source_basenames = extended_values.keys
+ is_extended = extended_values.any?{|parent_dir,pathlist| pathlist.present? }
+
userfiles = base.where(:name => source_basenames)
userfiles = userfiles.where(:data_provider_id => s_dp.id) if s_dp
userfiles = userfiles.where(:group_id => s_group.id) if s_group
@@ -71,17 +76,22 @@ def file_list_maker
# It is an error not to find exactly the same number of files as in
# the params' basenames array
found_names = userfiles.pluck(:name)
- file_count = found_names.size
- exp_count = source_basenames.size
+ file_count = found_names.size
+ exp_count = source_basenames.size
if (file_count == 0) || (strict_match && (file_count != exp_count))
cb_error "Could not find an exact match for the files. Found #{file_count} of #{exp_count} files"
end
- # Create CbrainFileList content and save it to DP
- cblist_content = CbrainFileList.create_csv_file_from_userfiles(userfiles)
-
- # Save result file
- result = create_file_for_request(CbrainFileList, "Loris-DQT-List.cbcsv", cblist_content)
+ result = nil
+ if is_extended
+ new_extended = extended_values.transform_values {|relpaths| { :all_to_keep => relpaths }.to_json }
+ userfiles = ExtendedCbrainFileList.extended_userfiles_by_name(userfiles, new_extended)
+ cblist_content = ExtendedCbrainFileList.create_csv_file_from_userfiles(userfiles)
+ result = create_file_for_request(ExtendedCbrainFileList, "Extended-Loris-DQT-List.cbcsv", cblist_content)
+ else
+ cblist_content = CbrainFileList.create_csv_file_from_userfiles(userfiles)
+ result = create_file_for_request(CbrainFileList, "Loris-DQT-List.cbcsv", cblist_content)
+ end
# Info message and unmatched entries
extra_response = {
diff --git a/BrainPortal/app/controllers/nh_storages_controller.rb b/BrainPortal/app/controllers/nh_storages_controller.rb
index ecda963b8..be5281ec1 100644
--- a/BrainPortal/app/controllers/nh_storages_controller.rb
+++ b/BrainPortal/app/controllers/nh_storages_controller.rb
@@ -29,9 +29,6 @@ class NhStoragesController < NeurohubApplicationController
before_action :login_required
- # A private exception class when testing connectivity
- class UserKeyTestConnectionError < RuntimeError ; end
-
def new #:nodoc:
@nh_dp = UserkeyFlatDirSshDataProvider.new
@nh_projects = find_nh_projects(current_user)
@@ -204,70 +201,19 @@ def check
@nh_dp.update_column(:online, true)
- master = @nh_dp.master # This is a handler for the connection, not persistent.
- tmpfile = "/tmp/dp_check.#{Process.pid}.#{rand(1000000)}"
-
- # Check #1: the SSH connection can be established
- if ! master.is_alive?
- test_error "Cannot establish the SSH connection. Check the configuration: username, hostname, port are valid, and SSH key is installed."
- end
-
- # Check #2: we can run "true" on the remote site and get no output
- status = master.remote_shell_command_reader("true",
- :stdin => "/dev/null",
- :stdout => "#{tmpfile}.out",
- :stderr => "#{tmpfile}.err",
- )
- stdout = File.read("#{tmpfile}.out") rescue "Error capturing stdout"
- stderr = File.read("#{tmpfile}.err") rescue "Error capturing stderr"
- if stdout.size != 0
- stdout.strip! if stdout.present? # just to make it pretty while still reporting whitespace-only strings
- test_error "Remote shell is not clean: got some bytes on stdout: '#{stdout}'"
- end
- if stderr.size != 0
- stderr.strip! if stdout.present?
- test_error "Remote shell is not clean: got some bytes on stderr: '#{stderr}'"
- end
- if ! status
- test_error "Got non-zero return code when trying to run 'true' on remote side."
- end
-
- # Check #3: the remote directory exists
- master.remote_shell_command_reader "test -d #{@nh_dp.remote_dir.bash_escape} && echo DIR-OK", :stdout => tmpfile
- out = File.read(tmpfile)
- if out != "DIR-OK\n"
- test_error "The remote directory doesn't seem to exist."
- end
-
- # Check #4: the remote directory is readable
- master.remote_shell_command_reader "test -r #{@nh_dp.remote_dir.bash_escape} && test -x #{@nh_dp.remote_dir.bash_escape} && echo DIR-READ", :stdout => tmpfile
- out = File.read(tmpfile)
- if out != "DIR-READ\n"
- test_error "The remote directory doesn't seem to be readable"
- end
+ # Performs an active check of the connection; will
+ # raise DataProviderTestConnectionError if something is wrong.
+ @nh_dp.check_connection!
# Ok, all is well.
flash[:notice] = "The configuration was tested and seems to be operational."
redirect_to :action => :show
- rescue UserKeyTestConnectionError => ex
+ rescue DataProviderTestConnectionError => ex
flash[:error] = ex.message
flash[:error] += "\nThis storage is marked as 'offline' until this test pass."
@nh_dp.update_column(:online, false)
redirect_to :action => :show
-
- ensure
- File.unlink "#{tmpfile}.out" rescue true
- File.unlink "#{tmpfile}.err" rescue true
-
- end
-
- private
-
- # Utility method to raise an exception
- # when testing for a DP's configuration.
- def test_error(message) #:nodoc:
- raise UserKeyTestConnectionError.new(message)
end
end
diff --git a/BrainPortal/app/controllers/noc_controller.rb b/BrainPortal/app/controllers/noc_controller.rb
index 4c84835a3..6414c4dd9 100644
--- a/BrainPortal/app/controllers/noc_controller.rb
+++ b/BrainPortal/app/controllers/noc_controller.rb
@@ -255,7 +255,7 @@ def gather_info(since_when) #:nodoc:
# Count of active statuses
info[:status_counts] = b.is_a?(BrainPortal) ? [] :
- b.cbrain_tasks.where(["updated_at > ?", since_when ])
+ b.cbrain_tasks.where(["updated_at > ? or status in (?)", since_when, CbrainTask::RUNNING_STATUS ])
.group(:status)
.count
.to_a # [ [ status, count ], [ status, count ] ... ]
diff --git a/BrainPortal/app/controllers/portal_controller.rb b/BrainPortal/app/controllers/portal_controller.rb
index 86b410585..fdb65b600 100644
--- a/BrainPortal/app/controllers/portal_controller.rb
+++ b/BrainPortal/app/controllers/portal_controller.rb
@@ -27,9 +27,9 @@ class PortalController < ApplicationController
include DateRangeRestriction
- api_available :only => [ :swagger ] # GET /swagger returns the .json specification
+ api_available :only => [ :swagger, :stats ] # GET /swagger returns the .json specification
- before_action :login_required, :except => [ :credits, :about_us, :welcome, :swagger, :available ] # welcome is here so that the redirect to the login page doesn't show the error message
+ before_action :login_required, :except => [ :credits, :about_us, :welcome, :swagger, :available, :stats ] # welcome is here so that the redirect to the login page doesn't show the error message
before_action :admin_role_required, :only => :portal_log
# Display a user's home page with information about their account.
@@ -253,7 +253,6 @@ def available #:nodoc:
.select { |t| t.tool_configs.to_a.any? { |tc|
tc.bourreau_id.present? &&
tc.bourreau_id > 0 &&
- tc.bourreau.try(:online) && # comment out to show them all
tc.version_name.present?
}
}
@@ -444,6 +443,22 @@ def swagger
end
end
+ # Return information about the usage of the platform.
+ def stats
+ @stats = RemoteResource.current_resource.meta[:stats] || {}
+ @stats_by_client = @stats[:UserAgents] || {}
+ @stats_by_contr_action = compile_total_stats(@stats)
+
+ @last_reset = (RemoteResource.current_resource.meta.md_for_key(:stats).created_at || Time.at(0)).utc.iso8601
+ @stats[:lastReset] = @last_reset
+
+ respond_to do |format|
+ format.html
+ format.xml { render :xml => @stats }
+ format.json { render :json => @stats }
+ end
+ end
+
private
def merge_vals_as_array(*sub_reports) #:nodoc:
@@ -486,4 +501,26 @@ def colorize_logs(data) #:nodoc:
data
end
+ # From the raw stats accumulated for all clients,
+ # controllers and actions, compile two other
+ # secondary stats: the sums by clients, and
+ # the sums by pair "controller,service".
+ def compile_total_stats(stats={}) #:nodoc:
+ stats_by_contr_action = {}
+
+ # stats['AllAgents'] is { 'controller' => { 'action' => [1,2] , ... }, ... }
+ all_agents = stats['AllAgents'] || stats[:AllAgents] || {}
+ all_agents.each do |controller, by_action|
+ by_action.each do |action, counts|
+ # By controller and action
+ contr_action = "#{controller},#{action}"
+ stats_by_contr_action[contr_action] ||= [0,0]
+ stats_by_contr_action[contr_action][0] += counts[0]
+ stats_by_contr_action[contr_action][1] += counts[1]
+ end
+ end
+
+ return stats_by_contr_action
+ end
+
end
diff --git a/BrainPortal/app/controllers/service_controller.rb b/BrainPortal/app/controllers/service_controller.rb
deleted file mode 100644
index 0269d3fa6..000000000
--- a/BrainPortal/app/controllers/service_controller.rb
+++ /dev/null
@@ -1,222 +0,0 @@
-
-#
-# CBRAIN Project
-#
-# Copyright (C) 2008-2012
-# The Royal Institution for the Advancement of Learning
-# McGill University
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-
-# Controller for the Service.
-# Implement actions as defined by the CANARIE Web Service Monitoring API.
-#
-# By default, all these actions are commented out in the route.rb file,
-# so for most installations this controller is NOT USED at all.
-class ServiceController < ApplicationController
-
- Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
-
- api_available
-
- # Return basic identification and provenance
- # information about the platform
- def info
- @info = { :name => "CBRAIN Data Platform",
- :synopsis => <<-SYNOPSYS.strip.gsub(/\s\s+/, " "), # the ugly gsub is because of CANARIE
-
- The CBRAIN platform is a web-based, collaborative research platform
- designed to address major issues in "Big Data" research through a
- single consistent framework by providing researchers the ability
- to easily execute large-scale pipelines for computational research.
- The platform's primary objective is to provide a user-friendly,
- extensible, integrated, and robust yet lightweight collaborative
- neuroimaging research platform providing transparent access to the
- heterogeneous computing and data resources available across Canada
- and around the world.
-
- SYNOPSYS
- :version => CbrainFileRevision.cbrain_head_tag,
- :institution => "McGill University",
- :releaseTime => Time.parse(CbrainFileRevision.cbrain_head_revinfo.datetime).utc.iso8601,
- :researchSubject => "Multi-discipline",
- :supportEmail => RemoteResource.current_resource.support_email,
- :tags => [ "neurology", "CBRAIN", "data transfer", "cluster",
- "supercomputer", "task", "data modeling", "visualization",
- ],
- }
-
- respond_to do |format|
- format.html
- format.xml { render :xml => @info }
- format.json { render :json => @info }
- end
- end
-
- # Return information about the usage of the platform.
- def stats
- stats = RemoteResource.current_resource.meta[:stats] || {}
- stats_by_contr_action = compile_total_stats(stats)
-
- @summary_stats = stats['UserAgents'].dup
- @last_reset = (RemoteResource.current_resource.meta.md_for_key(:stats).try(:created_at) || Time.now).utc.iso8601
- authenticated_actions = count_authenticated_actions(stats_by_contr_action)
- @summary_stats["TotalActions"] = authenticated_actions
- @summary_stats["lastReset"] = @last_reset
-
- # CANARIE only wants TWO fields. :-(
- @json_stats = {
- "Total Actions" => authenticated_actions,
- "lastReset" => @last_reset,
- }
-
- respond_to do |format|
- format.html
- format.xml { render :xml => @summary_stats }
- format.json { render :json => @json_stats }
- end
- end
-
- # Return the online documentation.
- def doc
- redirect_to 'https://github.com/aces/cbrain/wiki'
- end
-
- # Return release note describing the current version
- # of the platform APIs.
- def releasenotes
- redirect_to 'https://github.com/aces/cbrain/blob/master/Release-Notes.md'
- end
-
- # Provides information on how to get support
- # for the platform.
- def support
- about_us_url = url_for(:controller => :portal, :action => :about_us)
- @support = { :supportEmail => RemoteResource.current_resource.support_email,
- :aboutUs => about_us_url,
- :helpUrl => RemoteResource.current_resource.help_url,
- }
-
- respond_to do |format|
- format.html
- format.xml { render :xml => @support }
- format.json { render :json => @support }
- end
- end
-
- # Return link to the source code of the platform
- def source
- redirect_to 'https://github.com/aces/cbrain'
- end
-
- # Redirects to the main login page.
- def tryme
- redirect_to '/login'
- end
-
- # Allows users to view platform's
- # licencing/usage term.
- def licence
- respond_to do |format|
- format.html { redirect_to :controller => :portal, :action => :about_us }
- format.xml { head :not_acceptable }
- format.json { head :not_acceptable }
- end
- end
-
- # Allows user to view the software provenance
- def provenance
- respond_to do |format|
- format.html { render 'portal/provenance' }
- format.xml { head :not_acceptable }
- format.json { head :not_acceptable }
- end
- end
-
- # Base information
- def factsheet
- respond_to do |format|
- format.html { redirect_to :controller => :portal, :action => :about_us }
- format.xml { head :not_acceptable }
- format.json { head :not_acceptable }
- end
- end
-
- # Return information about the usage of the platform.
- def detailed_stats
- @stats = RemoteResource.current_resource.meta[:stats] || {}
- @stats_by_client = @stats[:UserAgents] || {}
- @stats_by_contr_action = compile_total_stats(@stats)
-
- @last_reset = (RemoteResource.current_resource.meta.md_for_key(:stats).created_at || Time.at(0)).utc.iso8601
- @stats[:lastReset] = @last_reset
-
- respond_to do |format|
- format.html
- format.xml { render :xml => @stats }
- format.json { render :json => @stats }
- end
- end
-
- private
-
- # From the raw stats accumulated for all clients,
- # controllers and actions, compile two other
- # secondary stats: the sums by clients, and
- # the sums by pair "controller,service".
- def compile_total_stats(stats={}) #:nodoc:
- stats_by_contr_action = {}
-
- # stats['AllAgents'] is { 'controller' => { 'action' => [1,2] , ... }, ... }
- all_agents = stats['AllAgents'] || stats[:AllAgents] || {}
- all_agents.each do |controller, by_action|
- by_action.each do |action, counts|
- # By controller and action
- contr_action = "#{controller},#{action}"
- stats_by_contr_action[contr_action] ||= [0,0]
- stats_by_contr_action[contr_action][0] += counts[0]
- stats_by_contr_action[contr_action][1] += counts[1]
- end
- end
-
- return stats_by_contr_action
- end
-
- # Returns a count of all actions that require
- # being authenticated; there is a built-in
- # exception list to ignore actions that can
- # be invoked externally without authentication
- # (for instance, /service/* or /portal/welcome)
- # Returns the sum of successful and unsuccessful
- # actions.
- def count_authenticated_actions(stats_by_contr_action = {}) #:nodoc:
- tot = 0;
- stats_by_contr_action.keys.sort.each do |contr_action|
- next if contr_action == 'portal,welcome'
- next if contr_action == 'portal,credits'
- next if contr_action == 'portal,about_us'
- controller = contr_action.split(",").first
- next if controller == 'service' # all of them
- next if controller == 'controls' # show
- next if controller == 'sessions' # new, show, destroy, create
- next if controller == 'nh_sessions'
- next if controller == 'noc'
- counts = stats_by_contr_action[contr_action] || [0,0]
- tot += counts[0] + counts[1] # OK + FAIL
- end
- tot
- end
-
-end
diff --git a/BrainPortal/app/controllers/sessions_controller.rb b/BrainPortal/app/controllers/sessions_controller.rb
index 0b254994d..8410ca194 100644
--- a/BrainPortal/app/controllers/sessions_controller.rb
+++ b/BrainPortal/app/controllers/sessions_controller.rb
@@ -331,7 +331,7 @@ def user_tracking(portal,origin='CBRAIN') #:nodoc:
# the same information afterwards. Thus the weird style alignment.
user.addlog( "Logged in on #{portal.name}/#{origin} with #{authentication_mechanism} from #{pretty_host} using #{pretty_brow}")
portal.addlog("User #{user.login} logged in on #{origin} with #{authentication_mechanism} from #{pretty_host} using #{pretty_brow}")
- user.update_attribute(:last_connected_at, Time.now)
+ user.update_column(:last_connected_at, Time.now)
# Admin users start with some differences in behavior
if user.has_role?(:admin_user)
diff --git a/BrainPortal/app/controllers/tasks_controller.rb b/BrainPortal/app/controllers/tasks_controller.rb
index 5a0fabb11..c6d6a2319 100644
--- a/BrainPortal/app/controllers/tasks_controller.rb
+++ b/BrainPortal/app/controllers/tasks_controller.rb
@@ -187,7 +187,7 @@ def new #:nodoc:
if tool_config_id # the prefered method
@tool_config = ToolConfig.find(tool_config_id)
if ! @tool_config.can_be_accessed_by?(current_user)
- raise ActiveRecord::RecordNotFound("Cannot access ToolConfig ##{tool_config_id}")
+ raise ActiveRecord::RecordNotFound.new("Cannot access ToolConfig ##{tool_config_id}")
end
else # Try to propose a version; usually that's when we get just a tool_id
if tool_id.blank?
@@ -197,7 +197,7 @@ def new #:nodoc:
end
tool = Tool.find(tool_id)
if ! tool.can_be_accessed_by?(current_user)
- raise ActiveRecord::RecordNotFound("Cannot access Tool ##{tool_id}")
+ raise ActiveRecord::RecordNotFound.new("Cannot access Tool ##{tool_id}")
end
bourreau_id = Bourreau.find_all_accessible_by_user(current_user).where(:online => true).pluck(:id) if bourreau_id.nil? # try them all
toolconfigs = ToolConfig.where(
@@ -1502,6 +1502,8 @@ def handle_preset_actions #:nodoc:
if (! preset_id.blank?) && preset = CbrainTask.where(:id => preset_id, :status => [ 'Preset', 'SitePreset' ]).first
old_params = @task.params.clone
@task.params = preset.params
+ @task.description = @task.description || ""
+ @task.description += "\n\nDescription from preset configuration:\n\n#{preset.description}" if preset.description.present?
@task.restore_untouchable_attributes(old_params, :include_unpresetable => true)
if preset.group && preset.group.can_be_accessed_by?(current_user)
@task.group = preset.group
diff --git a/BrainPortal/app/controllers/tool_configs_controller.rb b/BrainPortal/app/controllers/tool_configs_controller.rb
index d287c23ec..d71b2f6da 100644
--- a/BrainPortal/app/controllers/tool_configs_controller.rb
+++ b/BrainPortal/app/controllers/tool_configs_controller.rb
@@ -238,9 +238,16 @@ def update #:nodoc:
respond_to do |format|
new_record = @tool_config.new_record?
- if @tool_config.save_with_logging(current_user, %w( env_array script_prologue script_epilogue ncpus extra_qsub_args
- container_image_userfile_id containerhub_image_name
- container_engine container_index_location ))
+ if @tool_config.save_with_logging(current_user,
+ %w( version_name env_array script_prologue script_epilogue ncpus extra_qsub_args
+ container_image_userfile_id containerhub_image_name
+ container_engine container_index_location container_exec_args
+ inputs_readonly
+ singularity_overlays_specs singularity_use_short_workdir
+ boutiques_descriptor_path
+ )
+ )
+
if new_record
flash[:notice] = "Tool configuration is successfully created."
else
@@ -298,6 +305,7 @@ def tool_config_params #:nodoc:
:group_id, :ncpus, :container_image_userfile_id, :containerhub_image_name, :container_index_location,
:inputs_readonly,
:container_engine, :extra_qsub_args, :singularity_overlays_specs, :container_exec_args,
+ :singularity_use_short_workdir,
:boutiques_descriptor_path,
# The configuration of a tool in a VM managed by a
# ScirCloud Bourreau is defined by the following
diff --git a/BrainPortal/app/controllers/userfiles_controller.rb b/BrainPortal/app/controllers/userfiles_controller.rb
index f9869e118..3204b36d5 100644
--- a/BrainPortal/app/controllers/userfiles_controller.rb
+++ b/BrainPortal/app/controllers/userfiles_controller.rb
@@ -239,7 +239,7 @@ def stream
# Find and validate target userfile
@userfile = Userfile.find_accessible_by_user(userfile_id, current_user, :access_requested => :read)
if @userfile.nil?
- raise ActiveRecord::RecordNotFound("Could not retrieve a userfile with ID: #{userfile_id}")
+ raise ActiveRecord::RecordNotFound.new("Could not retrieve a userfile with ID: #{userfile_id}")
end
# If it's a SingleFile
@@ -433,8 +433,16 @@ def sync_multiple #:nodoc:
SyncStatus.where(:userfile_id => @userfiles.map(&:id), :status => [ "InSync" ]).all.each do |ss|
updated += 1 if ss.status_transition(ss.status,"ProvNewer")
end
- flash[:notice] = "Marked #{updated} files as newer on their Data Provider."
- redirect_to :action => :index
+ respond_to do |format|
+ format.html do
+ flash[:notice] = "Marked #{updated} files as newer on their Data Provider."
+ redirect_to :action => :index
+ end
+ format.json do
+ render :json => { :notice => "Marked #{updated} files as newer on provider" },
+ :status => :ok
+ end
+ end
return
end
@@ -1091,7 +1099,7 @@ def change_provider #:nodoc:
next if orig_provider.id == data_provider_id # no support for copy to same provider in the interface, yet.
res = nil
if task == :move
- raise "not owner" unless u.has_owner_access?(current_user)
+ raise RuntimeError.new("Not owner") unless u.has_owner_access?(current_user)
res = u.provider_move_to_otherprovider(new_provider, :crush_destination => crush_destination)
else # task is :copy
my_group_id = current_assignable_group.id
@@ -1145,10 +1153,15 @@ def delete_files #:nodoc:
failed_list = {}
CBRAIN.spawn_with_active_records_if(! api_request?, current_user, "Delete files") do
idlist = to_delete.raw_first_column(:id).shuffle
+ reset_dpids = {}
idlist.each_with_index do |userfile_id,count|
userfile = Userfile.find(userfile_id) rescue nil # that way we instantiate one record at a time
next unless userfile # in case it was destroyed externally
Process.setproctitle "Delete ID=#{userfile.id} #{count+1}/#{idlist.size}"
+ if ! reset_dpids[userfile.data_provider_id]
+ userfile.data_provider.reset_connection if userfile.data_provider.respond_to?(:reset_connection)
+ reset_dpids[userfile.data_provider_id] = true
+ end
begin
userfile.destroy
deleted_success_list << userfile
@@ -1703,13 +1716,13 @@ def extract_from_archive(archive_file_name, file_type = nil, attributes = {}) #:
:variable_text => report
)
end
- rescue => e
- Message.send_message(current_user,
- :message_type => 'error',
- :header => "File extraction failed",
- :description => "Some errors occurred while extracting files from archive '#{archive_file_name}'",
- :variable_text => e.message
- )
+ rescue => e
+ Message.send_message(current_user,
+ :message_type => 'error',
+ :header => "File extraction failed",
+ :description => "Some errors occurred while extracting files from archive '#{archive_file_name}'",
+ :variable_text => e.message
+ )
end
# This method creates a tar file of the userfiles listed
@@ -2103,7 +2116,7 @@ def userfile_for_viewer
# Otherwise we want to view a file inside a FileCollection.
# Create a fake Userfile to pass information to the viewer
sub_file_info = @top_userfile.provider_collection_index.detect { |u| u.name == sub_file_name }
- raise ActiveRecord::RecordNotFound("Could not retrieve a file with the name #{sub_file_name} inside the FileCollection") if !sub_file_info
+ raise ActiveRecord::RecordNotFound.new("Could not retrieve a file with the name #{sub_file_name} inside the FileCollection") if !sub_file_info
# Find the class for the new userfile object that will be used for viewing
viewer_userfile_class = viewer_class_name.try(:constantize) || @top_userfile.class
diff --git a/BrainPortal/app/helpers/basic_helper.rb b/BrainPortal/app/helpers/basic_helper.rb
index a17570719..fa1f68526 100644
--- a/BrainPortal/app/helpers/basic_helper.rb
+++ b/BrainPortal/app/helpers/basic_helper.rb
@@ -80,5 +80,12 @@ def tree_view_icon(level = 0)
(' ' * 4 * (level.presence || 0) + '↳').html_safe
end
+ # Renders 1234567 as 1,234,567
+ def number_with_commas(number)
+ s = number.to_s
+ return s if s !~ /\A\d+\z/ # anything not a series of digits is just returned as is
+ s.reverse.gsub(/(\d\d\d)(?=\d)/, '\1,').reverse
+ end
+
end
diff --git a/BrainPortal/app/helpers/disk_quotas_helper.rb b/BrainPortal/app/helpers/disk_quotas_helper.rb
new file mode 100644
index 000000000..60810c2d2
--- /dev/null
+++ b/BrainPortal/app/helpers/disk_quotas_helper.rb
@@ -0,0 +1,38 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# Helper methods for Disk Quota views.
+module DiskQuotasHelper
+
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # Returns a DiskQuota max_bytes in pretty form: 'None allowed' in red, or '126 MB' etc (colored)
+ def pretty_quota_max_bytes(quota)
+ quota.none_allowed? ? red_if(true, 'None allowed') : colored_pretty_size(quota.max_bytes)
+ end
+
+ # Returns a DiskQuota max_files in pretty form: 'None allowed' in red, or just a number
+ def pretty_quota_max_files(quota)
+ quota.none_allowed? ? red_if(true, 'None allowed') : number_with_commas(quota.max_files)
+ end
+
+end
diff --git a/BrainPortal/app/helpers/dynamic_form_helper.rb b/BrainPortal/app/helpers/dynamic_form_helper.rb
index 3aa1da1ce..238580ebe 100644
--- a/BrainPortal/app/helpers/dynamic_form_helper.rb
+++ b/BrainPortal/app/helpers/dynamic_form_helper.rb
@@ -49,15 +49,18 @@ def submit_button(value, options = {})
# Create a checkbox that will select or deselect all checkboxes on the page
# of class +checkbox_class+.
# Most +options+ are just treated as HTML attributes.
- # Except +options[:persistant_name]+; if provided,
- # an additional hidden input will be added to track
- # the state of the select_all checkbox.
+ #
+ # Except:
+ # - +options[:persistant_name]+: if provided, an additional hidden
+ # input will be added to track the state of the select_all checkbox.
def select_all_checkbox(checkbox_class, options = {})
options[:class] ||= ""
options[:class] += " select_all"
- options["data-checkbox-class"] = checkbox_class
- atts = options.reject{|x| x.to_s === "persistant_name"}.to_html_attributes
+ options["data-checkbox-class"] = checkbox_class
+ options["data-persistant-name"] = options[:persistant_name].present?
+
+ atts = options.reject { |x| x.to_s == "persistant_name" }.to_html_attributes
# Most common case just the select_all input
input = "".html_safe
@@ -66,11 +69,11 @@ def select_all_checkbox(checkbox_class, options = {})
# Add the hidden input; javascript code will update
# its value as needed.
hidden_options = {
- :name => options[:persistant_name],
+ :name => options[:persistant_name],
"data-checkbox-class" => checkbox_class,
:class => "select_all_hidden"
- }
-
+ }
+
hidden_atts = hidden_options.to_html_attributes
hidden_input = "".html_safe
inputs = "#{input} #{hidden_input}".html_safe
diff --git a/BrainPortal/app/helpers/groups_helper.rb b/BrainPortal/app/helpers/groups_helper.rb
index a53aefdd2..edaff110c 100644
--- a/BrainPortal/app/helpers/groups_helper.rb
+++ b/BrainPortal/app/helpers/groups_helper.rb
@@ -33,24 +33,15 @@ def css_group_type(group, group_user_count = nil)
return group.to_s.downcase unless group.is_a?(Group)
# SystemGroup subclasses; UserGroup => "user", EveryoneGroup => "everyone"
- return group.class.name.demodulize.match(/\A([A-Z][a-z]+)/).to_s.downcase if group.is_a?(SystemGroup)
+ return "user" if group.is_a?(UserGroup)
+ return "everyone" if group.is_a?(EveryoneGroup)
+ return "site" if group.is_a?(SiteGroup)
group_user_count ||= group.users.count
- return "invisible" if group.invisible?
return "public" if group.public?
- return "empty" if group_user_count == 0
return "shared" if group_user_count > 1
- return "personal"
+ return "private"
end
- # Produces a centered legend for every distinct group type in +groups+
- def group_legend(groups)
- return if groups.blank?
-
- center_legend(nil, groups.map { |g| css_group_type(g) }.uniq.map { |g|
- # 9675: UTF8 white circle, 9679: UTF8 black circle
- ["#{g == "all" ? "x25ef" : "x2b24"};", "#{g.titleize} Project"]
- })
- end
end
diff --git a/BrainPortal/app/helpers/select_box_helper.rb b/BrainPortal/app/helpers/select_box_helper.rb
index 63ca15f7b..bea85d59e 100644
--- a/BrainPortal/app/helpers/select_box_helper.rb
+++ b/BrainPortal/app/helpers/select_box_helper.rb
@@ -152,7 +152,7 @@ def group_select(parameter_name = "group_id", options = {}, select_tag_options =
end
# Step 3: Other project categories, in that order
- [ "Shared Work Projects", "Empty Work Projects", "Site Projects", "User Projects", "System Projects", "Invisible Projects", "Everyone Projects" ].each do |proj|
+ [ "Shared Work Projects", "Empty Work Projects", "Site Projects", "User Projects", "System Projects", "Invisible Projects", "Everyone Projects", "Public Projects" ].each do |proj|
ordered_category_grouped << [ proj, category_grouped_pairs.delete(proj) ] if category_grouped_pairs[proj]
end
diff --git a/BrainPortal/app/helpers/userfiles_helper.rb b/BrainPortal/app/helpers/userfiles_helper.rb
index 493d7eaa8..ad0eb5089 100644
--- a/BrainPortal/app/helpers/userfiles_helper.rb
+++ b/BrainPortal/app/helpers/userfiles_helper.rb
@@ -79,7 +79,7 @@ def file_link_table(previous_userfile, next_userfile, sort_index, options = {})
# Generates links to pretty file content for files inside FileCollections.
# Generates a download link if no viewer code can be found for the files.
- def data_link(file_name, userfile)
+ def data_link(file_name, userfile, replace_div_id="sub_viewer_filecollection_cbrain")
full_path_name = Pathname.new(userfile.cache_full_path.dirname + file_name)
display_name = full_path_name.basename.to_s
@@ -87,9 +87,12 @@ def data_link(file_name, userfile)
file_lstat = full_path_name.lstat # lstat doesn't follow symlinks, so we can tell if it is one
- return h(display_name) unless file_lstat.file?
+ # return if userfile class is a FileCollection and file is not a file (i.e. a directory)
+ return h(display_name) if userfile.is_a?(FileCollection) && !file_lstat.file?
+
+ matched_class = SingleFile.descendants.unshift(SingleFile).find { |c| file_name =~ c.file_name_pattern }
+ matched_class ||= userfile.class if userfile.is_a?(SingleFile)
- matched_class = SingleFile.descendants.unshift(SingleFile).find { |c| file_name =~ c.file_name_pattern }
viewer = matched_class.class_viewers.first.partial rescue nil
if matched_class && viewer
@@ -100,7 +103,7 @@ def data_link(file_name, userfile)
:viewer => viewer,
:viewer_userfile_class => matched_class
),
- :replace => "sub_viewer_filecollection_cbrain",
+ :replace => replace_div_id,
}
) do
(""+display_name+"").html_safe
diff --git a/BrainPortal/app/models/application_record.rb b/BrainPortal/app/models/application_record.rb
index 6bf174128..7689f069e 100644
--- a/BrainPortal/app/models/application_record.rb
+++ b/BrainPortal/app/models/application_record.rb
@@ -96,6 +96,8 @@ def self.default_api_limit #:nodoc:
end
# Useful generic scopes for console users.
+ scope :uhour, -> { where [ "#{self.quoted_table_name}.updated_at >= ?", 1.hour.ago ] }
+ scope :chour, -> { where [ "#{self.quoted_table_name}.created_at >= ?", 1.hour.ago ] }
scope :utoday, -> { where [ "#{self.quoted_table_name}.updated_at >= ?", Time.now.at_beginning_of_day ] }
scope :ctoday, -> { where [ "#{self.quoted_table_name}.created_at >= ?", Time.now.at_beginning_of_day ] }
# Note: the following two scopes imply that the week starts on Monday morning
diff --git a/BrainPortal/app/models/bourreau.rb b/BrainPortal/app/models/bourreau.rb
index 31622acbb..2124e9e65 100644
--- a/BrainPortal/app/models/bourreau.rb
+++ b/BrainPortal/app/models/bourreau.rb
@@ -454,7 +454,11 @@ def self.process_command_alter_tasks(command)
CBRAIN.spawn_with_active_records(:admin, "AlterTask #{newstatus}") do
+ signaled_finish = false # set to true when receiving TERM
+ Signal.trap("TERM") { signaled_finish = true }
+
taskids.shuffle.each_with_index do |task_id,count|
+ break if signaled_finish # ends the entire task ID list
Process.setproctitle "AlterTask #{newstatus} ID=#{task_id} #{count+1}/#{taskids.size}"
task = CbrainTask.where(:id => task_id, :bourreau_id => myself.id).first
next unless task # doesn't even exist? just ignore it
diff --git a/BrainPortal/app/models/boutiques_cluster_task.rb b/BrainPortal/app/models/boutiques_cluster_task.rb
index b8a0a9a25..be695e44e 100644
--- a/BrainPortal/app/models/boutiques_cluster_task.rb
+++ b/BrainPortal/app/models/boutiques_cluster_task.rb
@@ -137,7 +137,6 @@ def cluster_commands #:nodoc:
end
# Write down the file with the boutiques descriptor itself
- boutiques_json_basename = "boutiques.#{self.run_id}.json"
File.open(boutiques_json_basename, "w") do |fh|
cleaned_desc = descriptor.dup
cleaned_desc.delete("groups") if cleaned_desc.groups.size == 0 # bosh is picky
@@ -152,7 +151,11 @@ def cluster_commands #:nodoc:
#{boutiques_json_basename.bash_escape}
SIMULATE
simulate_com.gsub!("\n"," ")
- simulout = IO.popen(simulate_com) { |fh| fh.read }
+ begin
+ simulout = IO.popen(simulate_com) { |fh| fh.read }
+ rescue => ex
+ cb_error "The 'bosh exec simulate' command failed: #{ex.class} #{ex.message}"
+ end
simul_status = $? # a Process::Status object
if ! simul_status.success?
cb_error "The 'bosh exec simulate' command failed with return code #{simul_status.exitstatus}"
@@ -161,7 +164,9 @@ def cluster_commands #:nodoc:
commands = <<-COMMANDS
# Main tool command, generated with bosh exec simulate
#{simulout.strip}
- echo $? > #{exit_status_filename.bash_escape}
+ status=$?
+ echo $status > #{exit_status_filename.bash_escape}
+ bash -c "exit $status" # clumsy but I can't think of any better way
COMMANDS
else # exec launch mode
# The bosh launch command. This is all a single line, but broken up
@@ -171,7 +176,9 @@ def cluster_commands #:nodoc:
bosh exec launch \\
#{boutiques_json_basename.bash_escape} \\
#{self.invoke_json_basename.bash_escape}
- echo $? > #{exit_status_filename.bash_escape}
+ status=$?
+ echo $status > #{exit_status_filename.bash_escape}
+ bash -c "exit $status" # clumsy but I can't think of any better way
COMMANDS
end
commands.gsub!(/(\S) +(\S)/,'\1 \2') # make pretty
@@ -245,8 +252,13 @@ def save_results #:nodoc:
self.addlog("Attempting to save result file #{path}")
name, userfile_class = name_and_type_for_output_file(output, path)
- # Save the file (possible overwrite if race condition)
+ # Select an alternative and safe output type when guessing it produces a mismatch
+ userfile_class = SingleFile if File.file?(path) && !(userfile_class <= SingleFile)
+ userfile_class = FileCollection if File.directory?(path) && !(userfile_class <= FileCollection)
+
+ # Save the file
outfile = safe_userfile_find_or_new(userfile_class, :name => name)
+ new_out = outfile.new_record?
unless outfile.save
messages = outfile.errors.full_messages.join("; ")
@@ -257,17 +269,22 @@ def save_results #:nodoc:
end
# Transfer content to DataProvider
+ self.addlog("Created result file '#{name}' (ID #{outfile.id})") if new_out
+ self.addlog("Reused result file '#{name}' (ID #{outfile.id})") if ! new_out
+ self.addlog("Uploading content to #{outfile.data_provider.type} '#{outfile.data_provider.name}' (ID #{outfile.data_provider_id})")
outfile.cache_copy_from_local_file(path)
+ self.addlog("Content uploaded")
+
+ # Record ID of output file in task's params
params["_cbrain_output_#{output.id}"] ||= []
params["_cbrain_output_#{output.id}"] |= [ outfile.id ]
- self.addlog("Saved result file #{name}")
# Add provenance logs
all_file_input_ids = descriptor.file_inputs.map do |input|
invoke_params[input.id]
end.compact.uniq
parent_userfiles = Userfile.where(:id => all_file_input_ids).to_a
- self.addlog_to_userfiles_these_created_these(parent_userfiles, [outfile]) if parent_userfiles.present?
+ self.addlog_to_userfiles_these_created_these(parent_userfiles, [outfile], "", 2) if parent_userfiles.present?
# If there is only one input file, we move the output under it
if parent_userfiles.size == 1
@@ -295,7 +312,7 @@ def save_results #:nodoc:
self.addlog "Attempting to update input '#{userfile.name}' on DataProvider '#{userfile.data_provider.name}'"
userfile.cache_is_newer
userfile.sync_to_provider
- self.addlog_to_userfiles_processed(userfile, "(content modified in place)")
+ self.addlog_to_userfiles_processed(userfile, "(content modified in place)", 1)
end
end
@@ -323,7 +340,8 @@ def name_and_type_for_output_file(output, pathname)
desc = descriptor_for_save_results
custom = desc.custom || {} # 'custom' is not packaged as an object, just a hash
idlist = custom['cbrain:no-run-id-for-outputs'].presence # list of IDs where no run id inserted
- no_run_id = true if idlist && idlist.include?(output.id)
+ # We allow no_run_id only if the dest DP is MultiLevel, presumably the output goes to "a/b/c/basename_without_id"
+ no_run_id = true if idlist && idlist.include?(output.id) && self.results_data_provider.has_browse_path_capabilities?
# Get basename, use it to guess the class
name = File.basename(pathname)
@@ -393,7 +411,13 @@ def boutiques_bosh_exec_mode
# Returns the basename of the JSON file
# that holds the 'invoke' structure for bosh.
def invoke_json_basename
- "invoke.#{self.run_id}.json"
+ ".invoke.#{self.run_id}.json"
+ end
+
+ # Returns the basename of the JSON file
+ # that holds the boutiques descriptor for bosh.
+ def boutiques_json_basename
+ ".boutiques.#{self.run_id}.json"
end
# Return true or false depending on if
diff --git a/BrainPortal/app/models/boutiques_portal_task.rb b/BrainPortal/app/models/boutiques_portal_task.rb
index 4b525e653..d2bc2f900 100644
--- a/BrainPortal/app/models/boutiques_portal_task.rb
+++ b/BrainPortal/app/models/boutiques_portal_task.rb
@@ -27,7 +27,9 @@ class BoutiquesPortalTask < PortalTask
# This method returns the BoutiquesDescriptor
# directly associated with the ToolConfig for the task
def boutiques_descriptor
- self.tool_config.boutiques_descriptor
+ self.tool_config.boutiques_descriptor ||
+ self.find_compatible_placeholder_descriptor || # Workaround #1 for misconfigured portal
+ self.generate_placeholder_descriptor # Workaround #2 for misconfigured portal
end
# This method returns the same descriptor as
@@ -218,7 +220,7 @@ def after_form
# Check the content of all CbrainFileLists (cbcsv)
# ------------------------------------------------
# Get all the input cbcsv files
- cbcsvs = self.cbcsv_files
+ cbcsvs = self.cbcsv_files # [ [input, cbcsv_userfile], [input, cbcsv_userfile], ... ]
numRows = nil # Keep track of number of files per cbcsv
# Validate each cbcsv (all columns match per row, user has access to the file)
for input, cbcsv in cbcsvs
@@ -230,8 +232,8 @@ def after_form
# If the number of rows does not match, error
# We need only check this for inputs that are not "list".
if ! input.list
- currNumRows = (cbcsv.ordered_raw_ids || []).length
- numRows = numRows.nil? ? currNumRows : numRows
+ currNumRows = (cbcsv.ordered_raw_ids || []).length
+ numRows ||= currNumRows
if currNumRows != numRows
params_errors.add(invokename, " does not have the same number of files (#{currNumRows}) as in other present cbcsvs (#{numRows})")
next
@@ -263,6 +265,18 @@ def self.add_pretty_params_names(inputs)
def final_task_list #:nodoc:
descriptor = self.descriptor_for_final_task_list
self.addlog(descriptor.file_revision_info.format("%f rev. %s %a %d"))
+ valid_input_keys = descriptor.inputs.map(&:id)
+
+ # Add author(s) information
+ authors = Array(descriptor.custom['cbrain:author'])
+ authors = authors.empty? ? "No CBRAIN author information" :
+ authors.join(", ")
+ self.addlog("CBRAIN Author(s): #{authors}")
+
+ # Add information about Boutiques module
+ boutiques_module_information().each do |log_info|
+ self.addlog(log_info)
+ end
# --------------------------------------
# Special case where there is a single file input
@@ -277,27 +291,36 @@ def final_task_list #:nodoc:
if descriptor.file_inputs.size == 1
input = descriptor.file_inputs.first
- fillTask = lambda do |userfile,tsk|
- tsk.invoke_params[input.id] = userfile.id
+ fillTask = lambda do |userfile,tsk,extra_params=nil|
+ tsk.params[:interface_userfile_ids] |= [ userfile.id.to_s ]
+ tsk.invoke_params[input.id] = userfile.id
tsk.sanitize_param(input)
- tsk.description ||= ''
- tsk.description += " #{input.id}: #{userfile.name}"
+ tsk.description = "#{input.id}: #{userfile.name}\n#{tsk.description}".strip
+ tsk.invoke_params.merge!(extra_params.slice(*valid_input_keys)) if extra_params
tsk.description.strip!
tsk
end
- tasklist = self.params[:interface_userfile_ids].map do |userfile_id|
+ original_userfiles_ids = self.params[:interface_userfile_ids].dup
+ self.params[:interface_userfile_ids] = [] # zap it; we'll re-introduce each userfile.id as needed
+ tasklist = original_userfiles_ids.map do |userfile_id|
f = Userfile.find_accessible_by_user( userfile_id, self.user, :access_requested => file_access_symbol() )
# One task for that file
if (! f.is_a?( CbrainFileList ) || input.list) # in case of a list input, we *do* assign it the CbFileList
task = self.dup
fillTask.( f, task )
-
else # One task per userfile in the CbrainFileList
- ufiles = f.userfiles_accessible_by_user!( self.user, nil, nil, file_access_symbol() )
- # Skip files that are purposefully nil (e.g. given id 0 by the user)
- subtasks = ufiles.select { |u| ! u.nil? }.map { |u| fillTask.( u, self.dup ) }
+ ufiles = f.userfiles_accessible_by_user!( self.user, nil, nil, file_access_symbol() )
+ ordered_extra_params = f.is_a?(ExtendedCbrainFileList) ? f.ordered_params : []
+
+ # Fill subtasks array
+ subtasks = []
+ ufiles.each_with_index do |u, index|
+ next if u.nil?
+ subtasks << fillTask.( u, self.dup, ordered_extra_params[index])
+ end
+
subtasks # an array of tasks
end
end
@@ -311,7 +334,7 @@ def final_task_list #:nodoc:
# --------------------------------------
# Grab all the cbcsv input files
- cbcsvs = self.cbcsv_files(descriptor)
+ cbcsvs = self.cbcsv_files(descriptor) # [ [input, cbcsv_userfile], [input, cbcsv_userfile], ... ]
cbcsvs.reject! { |pair| pair[0].list } # ignore file inputs with list=true; they just get the CBCSV directly
# Default case: just return self as a single task
@@ -322,9 +345,17 @@ def final_task_list #:nodoc:
# Array with the actual userfiles corresponding to the cbcsv
mapCbcsvToUserfiles = cbcsvs.map { |f| f[1].ordered_raw_ids.map { |i| (i==0) ? nil : i } }
+ # Array with the actual extra json_params corresponding to the cbcsv
+ mapCbcsvToParams = cbcsvs.map do |f|
+ cbcsv = f[1]
+ cbcsv.is_a?(ExtendedCbrainFileList) ?
+ cbcsv.ordered_params : []
+ end
+
# Task list to fill and total number of tasks to output
- tasklist = []
- nTasks = mapCbcsvToUserfiles[0].length
+ tasklist = []
+ nTasks = mapCbcsvToUserfiles[0].length
+
# Iterate over each task that needs to be generated
for i in 0..(nTasks - 1)
# Clone this task
@@ -335,6 +366,8 @@ def final_task_list #:nodoc:
#currTask.params[:interface_userfile_ids] << mapCbcsvToUserfiles unless currId.nil?
currTask.invoke_params[cinput.id] = currId # If id = 0 or nil, currId = nil
currTask.invoke_params.delete(cinput.id) if currId.nil?
+ extra_params_from_Cbcsv = mapCbcsvToParams[j][i] || {}
+ currTask.invoke_params.merge!(extra_params_from_Cbcsv.slice(*valid_input_keys))
end
# Add the new task to our tasklist
tasklist << currTask
@@ -385,7 +418,7 @@ def isInactive(input)
(
val.nil? || # most of the time, the interface sends NO value at all, which is what we prefer
(type == 'Flag' && val == "0") || # checkboxes send their values as strings 0 and 1,
- (type == 'Flag' && val == false) # but normally they are transformed into bool in sanitize_params
+ (type == 'Flag' && val == false) # but normally they are transformed into bool in sanitize_param()
)
end
@@ -408,6 +441,7 @@ def ascertainCbcsvUserAccess(f,id)
msg2 = lambda { |e| " cbcsv accessibility error in #{f.name}! Possibly due to cbcsv malformation. (Received error: #{e.inspect})" }
errFlag = true # Whether the error checking found a problem
begin # Check that the user has access to all of the files in the cbcsv
+ f.sync_to_cache # We need the content of the cbcsv
f.userfiles_accessible_by_user!(self.user, nil, nil, file_access_symbol()) # side effect: cache entries within f
for i in f.ordered_raw_ids.select{ |r| (! r.nil?) && (r.to_s != '0') }
accessible = Userfile.find_accessible_by_user( i, self.user, :access_requested => file_access_symbol() ) rescue nil
@@ -448,6 +482,11 @@ def sanitize_param(input)
name = input.id
type = input.type.downcase.to_sym # old code convention from previous integrator
+ # For strings, we support a special list of parameters
+ # that can be empty strings.
+ descriptor = self.descriptor_for_after_form
+ empty_string_allowed = Array(descriptor.custom['cbrain:allow_empty_strings']).include?(name)
+
# Taken userfile names. An error will be raised if two input files have the
# same name.
@taken_files ||= Set.new
@@ -476,18 +515,17 @@ def sanitize_param(input)
when :string
value = value.to_s if value.is_a?(Symbol)
params_errors.add(invokename, " not a string (#{value})") unless value.is_a?(String)
- params_errors.add(invokename, " is blank") if value.blank?
+ params_errors.add(invokename, " is blank") if value.blank? && !empty_string_allowed
# The following two checks are to prevent cases when
# a string param is used as a path
params_errors.add(invokename, " cannot contain newlines") if value.to_s =~ /[\n\r]/
params_errors.add(invokename, " cannot start with this character") if value.to_s =~ /^[\.\/]+/
+ params_errors.add(invokename, " cannot move up dirs") if value.to_s.include? "/../"
# Try to match against various common representation of true and false
when :flag
- if value.is_a?(String)
- value = true if value =~ /\A(true|t|yes|y|on|1)\z/i
- value = false if value =~ /\A(false|f|no|n|off|0|)\z/i
- end
+ value = true if value.to_s =~ /\A(true|t|yes|y|on|1)\z/i
+ value = false if value.to_s =~ /\A(false|f|no|n|off|0|)\z/i
if ! [ true, false ].include?(value)
params_errors.add(invokename, ": not true or false (#{value})")
@@ -622,4 +660,118 @@ def file_access_symbol
@_file_access ||= (self.class.properties[:readonly_input_files].present? || self.tool_config.try(:inputs_readonly) ? :read : :write)
end
+ # In the case of a misconfiguration of the portal, or if the file for
+ # the Boutiques descriptor has disappeared, this method will look at
+ # other ToolConfigs associated with the tool of the task and try find
+ # a replacement descriptor. It's not garanteed that this descriptor
+ # is compatible with the params of the task but it's probably good
+ # enough to show the task to the user.
+ #
+ # The task object itself will get a permanent ActiveRecord
+ # error added to its base to prevent any saving/editing/launching.
+ #
+ # See also the method generate_placeholder_descriptor which, in case
+ # we can't find a descriptor here, is called to create a fake
+ # descriptor out of thin air.
+ #
+ # Known limitations: if the original task was integrated with
+ # special modules that have custom entries that are different
+ # than the ones in the found descriptor, the callback methods
+ # setup(), before_form() etc might crash.
+ def find_compatible_placeholder_descriptor
+
+ # Build a list of tool configs to scan
+ tool_configs = self.tool.tool_configs.order("created_at desc").to_a
+
+ # Find the most recently created one (hopefully, backwards compatible)
+ compat_tool_config = tool_configs.detect { |tc| tc.boutiques_descriptor } # first one is the one we use
+ return nil if ! compat_tool_config
+
+ # Add persistent errors, to make sure the task cannot be touched.
+ if self.errors.blank?
+ cur_version = self.tool_config.version_name
+ alt_version = compat_tool_config.version_name
+ self.errors.add(:base, "The Boutiques Descriptor for version '#{cur_version}' of this task has disappeared. This is a configuration error, contact the administrators.")
+ self.errors.add(:base, "A descriptor from another version ('#{alt_version}') is currently in use to allow you to view the parameters.")
+ self.errors.add(:unsavable, ": The parameters for this task cannot be modified.") # this special error prevents the save button from working
+ end
+
+ compat_tool_config.boutiques_descriptor
+ end
+
+ # In the case of a misconfiguration of the portal, or if the file for
+ # the Boutiques descriptor has disappeared, this method will look at the
+ # current params of the task and create out of thin air a new fake
+ # descriptor for it. This allows the user to (at least) view the task
+ # in the interface.
+ #
+ # The task object itself will get a permanent ActiveRecord
+ # error added to its base to prevent any saving/editing/launching.
+ #
+ # Known limitations: if the original task was integrated with
+ # special modules that require custom entries in the descriptor,
+ # the callback methods setup(), before_form() etc might crash.
+ # That's because the descriptor generated here contains an empty
+ # "custom" entry.
+ def generate_placeholder_descriptor
+
+ # Add persistent errors, to make sure the task cannot be touched.
+ if self.errors.blank?
+ cur_version = self.tool_config.version_name
+ self.errors.add(:base, "The Boutiques Descriptor for version '#{cur_version}' of this task has disappeared. This is a configuration error, contact the administrators.")
+ self.errors.add(:base, "A replacement descriptor is currently in use to allow you to view the parameters.")
+ self.errors.add(:base, "Because the type information for the parameters is missing, they are all shown as strings.")
+ self.errors.add(:unsavable, ": The parameters for this task cannot be modified.") # this special error prevents the save button from working
+ end
+
+ # Main descriptor
+ fake = BoutiquesSupport::BoutiquesDescriptor.new(
+ :name => self.pretty_type,
+ :description => 'Missing Boutiques Descriptor Placeholder',
+ "tool-version" => self.tool_config.version_name,
+ "schema-version" => "0.5",
+ "command-line" => "false", # as in, the unix command 'false'
+ :custom => { 'cbrain:integrator_modules' => {} }, # we can't do better than that
+ )
+
+ # Create fake inputs for files
+ (self.params[:interface_userfile_ids] || []).each do |userfile_id|
+ fake.inputs << BoutiquesSupport::Input.new(
+ :id => "inputfile-#{userfile_id}",
+ :name => "inputfile-#{userfile_id}",
+ :type => 'File',
+ :description => "Fake input file",
+ )
+ end
+
+ # Create fake inputs for all other params (including those that are files)
+ (self.invoke_params || {}).keys.each do |input_id|
+ fake.inputs << BoutiquesSupport::Input.new(
+ :id => input_id,
+ :name => input_id,
+ :type => 'String',
+ :description => "Fake string input for key '#{input_id}'",
+ )
+ end
+
+ fake
+ end
+
+ private
+
+ # Prepare an array with revision information of
+ # all the Boutiques integrator modules used by the
+ # tools.
+ def boutiques_module_information #:nodoc:
+ descriptor = self.descriptor_for_final_task_list
+
+ integrator_modules = descriptor.custom['cbrain:integrator_modules'] || {}
+
+ integrator_modules.map do |module_name, _|
+ module_name = module_name.constantize
+ rev_info = module_name::Revision_info
+ rev_info.format("%f rev. %s %a %d")
+ end
+ end
+
end
diff --git a/BrainPortal/app/models/cbrain_task.rb b/BrainPortal/app/models/cbrain_task.rb
index acf752f91..56578bc9d 100644
--- a/BrainPortal/app/models/cbrain_task.rb
+++ b/BrainPortal/app/models/cbrain_task.rb
@@ -98,10 +98,15 @@ class CbrainTask < ApplicationRecord
else
value = s
end
- where(:status => value)
+ where("cbrain_tasks.status" => value)
}
- scope :active, lambda { status( :active ) }
+ scope :active, -> { status( :active ) }
+ scope :failed, -> { status( :failed ) }
+ scope :failed_setup, -> { where( "cbrain_tasks.status" => 'Failed To Setup' ) }
+ scope :failed_cluster, -> { where( "cbrain_tasks.status" => 'Failed On Cluster' ) }
+ scope :failed_post, -> { where( "cbrain_tasks.status" => 'Failed To PostProcess' ) }
+ scope :completed, -> { where( "cbrain_tasks.status" => 'Completed' ) }
scope :real_tasks,
-> { where( "cbrain_tasks.status <> 'Preset' AND cbrain_tasks.status <> 'SitePreset'" ) }
diff --git a/BrainPortal/app/models/cluster_task.rb b/BrainPortal/app/models/cluster_task.rb
index 5ffad9202..e2623f0c2 100644
--- a/BrainPortal/app/models/cluster_task.rb
+++ b/BrainPortal/app/models/cluster_task.rb
@@ -20,12 +20,6 @@
# along with this program. If not, see .
#
-require 'stringio'
-require 'base64'
-require 'fileutils'
-require 'json'
-require 'json-schema'
-
#Abstract model representing a job running on a cluster. This is the core class for
#launching GridEngine/PBS/MOAB/UNIX jobs (etc) using Scir.
#
@@ -521,14 +515,14 @@ def path_is_in_workdir?(path) #:nodoc:
# and add a log entry to each userfile identifying that
# it was processed by the current task. An optional
# comment can be appended to the message.
- def addlog_to_userfiles_processed(userfiles,comment = "")
+ def addlog_to_userfiles_processed(userfiles, comment = "", caller_level=0)
userfiles = [ userfiles ] unless userfiles.is_a?(Array)
myname = self.fullname
mylink = "/tasks/#{self.id}" # can't use show_task_path() on Bourreau side
mymarkup = "[[#{myname}][#{mylink}]]"
userfiles.each do |u|
next unless u.is_a?(Userfile) && u.id
- u.addlog_context(self,"Processed by task #{mymarkup} #{comment}",3)
+ u.addlog_context(self,"Processed by task #{mymarkup} #{comment}",3+caller_level)
end
end
@@ -536,14 +530,14 @@ def addlog_to_userfiles_processed(userfiles,comment = "")
# and add a log entry to each userfile identifying that
# it was created by the current task. An optional
# comment can be appended to the message.
- def addlog_to_userfiles_created(userfiles,comment = "")
+ def addlog_to_userfiles_created(userfiles, comment = "", caller_level=0)
userfiles = [ userfiles ] unless userfiles.is_a?(Array)
myname = self.fullname
mylink = "/tasks/#{self.id}" # can't use show_task_path() on Bourreau side
mymarkup = "[[#{myname}][#{mylink}]]"
userfiles.each do |u|
next unless u.is_a?(Userfile) && u.id
- u.addlog_context(self,"Created/updated by #{mymarkup} #{comment}",3)
+ u.addlog_context(self,"Created/updated by #{mymarkup} #{comment}",3+caller_level)
end
end
@@ -552,7 +546,7 @@ def addlog_to_userfiles_created(userfiles,comment = "")
# and records for each created file what were the creators, and for
# each creator file what files were created, along with a link
# to the task itself. An optional comment can be appended to the header message.
- def addlog_to_userfiles_these_created_these(creatorlist, createdlist, comment = "")
+ def addlog_to_userfiles_these_created_these(creatorlist, createdlist, comment="", caller_level=0)
# Two lists of userfiles. Make sure their contents are OK.
creatorlist = Array(creatorlist).select { |u| u.is_a?(Userfile) && u.id }
@@ -570,9 +564,9 @@ def addlog_to_userfiles_these_created_these(creatorlist, createdlist, comment =
# Add an entry to each creator files, listing created files
creatorlist.each do |creator|
if createdlist.size == 1 # a common case; create shorter log entry then.
- creator.addlog_context(self, "Used by task #{mymarkup} to create #{createdMarkups[0]}. #{comment}", 4)
+ creator.addlog_context(self, "Used by task #{mymarkup} to create #{createdMarkups[0]}. #{comment}", 4+caller_level)
else
- creator.addlog_context(self, "Used by task #{mymarkup}, list of #{createdlist.size} created files follow. #{comment}", 4)
+ creator.addlog_context(self, "Used by task #{mymarkup}, list of #{createdlist.size} created files follow. #{comment}", 4+caller_level)
createdMarkups.each_slice(5).each do |files_4|
creator.addlog(files_4.join(", "))
end
@@ -582,9 +576,9 @@ def addlog_to_userfiles_these_created_these(creatorlist, createdlist, comment =
# Add an entry to each created files, listing creators
createdlist.each do |created|
if creatorlist.size == 1 # a common case; create shorter log entry then.
- created.addlog_context(self, "Created/updated by task #{mymarkup} from file #{creatorMarkups[0]}. #{comment}", 4)
+ created.addlog_context(self, "Created/updated by task #{mymarkup} from file #{creatorMarkups[0]}. #{comment}", 4+caller_level)
else
- created.addlog_context(self, "Created/updated by task #{mymarkup}, list of #{creatorlist.size} used files follow. #{comment}", 4)
+ created.addlog_context(self, "Created/updated by task #{mymarkup}, list of #{creatorlist.size} used files follow. #{comment}", 4+caller_level)
creatorMarkups.each_slice(5).each do |files_4|
created.addlog(files_4.join(", "))
end
@@ -624,10 +618,14 @@ def tool_config_system(command)
# Build script
script = ""
+ # flag to guaranty propagation of env variables to the singularity/apptainer
+ # as far I know only needed to reverse effect of --cleanenv option, and otherwise all vars are copied to the container
+ # yet potentially more cases may be identified
+ propagate = self.use_singularity?
# Add prologues in specialization order
- script += bourreau_glob_config.to_bash_prologue if bourreau_glob_config
- script += tool_glob_config.to_bash_prologue if tool_glob_config
- script += tool_config.to_bash_prologue if tool_config
+ script += bourreau_glob_config.to_bash_prologue propagate if bourreau_glob_config
+ script += tool_glob_config.to_bash_prologue propagate if tool_glob_config
+ script += tool_config.to_bash_prologue propagate if tool_config
# Add CBRAIN special inits
script += self.supplemental_cbrain_tool_config_init
# Add the command
@@ -775,11 +773,11 @@ def post_process
saveok = saveok && self.save_results
self.meta[:no_end_keyword_check] = nil
end
- self.update_size_of_cluster_workdir
if ! saveok
self.status_transition(self.status, "Failed On Cluster")
self.addlog("Data processing failed on the cluster.")
else
+ self.update_size_of_cluster_workdir # callbacks and modules might have cleaned up the files by now
self.addlog("Post processing completed.")
self.status_transition(self.status, "Completed")
end
@@ -1780,27 +1778,16 @@ def submit_cluster_job
# Joined version of all the lines in the scientific script
command_script = commands.join("\n")
- # Add HOME switching back and forth
- command_script = <<-HOME_SWITCHING
-# Preserve system HOME, then switch it to the task's workdir
-_cbrain_home_="$HOME"
-export HOME=#{self.full_cluster_workdir.bash_escape}
-
-#{command_script}
-
-# Restore system HOME (while preserving the latest exit code)
-_cbrain_status_="$?"
-export HOME="$_cbrain_home_"
-bash -c "exit $_cbrain_status_"
- HOME_SWITCHING
-
# In case of Docker or Singularity, we rewrite the scientific script inside
# yet another wrapper script.
if self.use_docker?
+ command_script = wrap_new_HOME(command_script, self.full_cluster_workdir)
command_script = self.docker_commands(command_script)
elsif self.use_singularity?
load_singularity_image
- command_script = self.singularity_commands(command_script)
+ command_script = self.singularity_commands(command_script) # note: invokes wrap_new_HOME itself
+ else
+ command_script = wrap_new_HOME(command_script, self.full_cluster_workdir)
end
# Create a bash science script out of the text
@@ -1813,9 +1800,9 @@ def submit_cluster_job
# by ClusterTask
# #{ClusterTask.revision_info.to_s}
-#{bourreau_glob_config ? bourreau_glob_config.to_bash_prologue : ""}
-#{tool_glob_config ? tool_glob_config.to_bash_prologue : ""}
-#{tool_config ? tool_config.to_bash_prologue : ""}
+#{bourreau_glob_config ? bourreau_glob_config.to_bash_prologue(self.use_singularity?) : ""}
+#{tool_glob_config ? tool_glob_config.to_bash_prologue(self.use_singularity?) : ""}
+#{tool_config ? tool_config.to_bash_prologue(self.use_singularity?) : ""}
#{self.supplemental_cbrain_tool_config_init}
# CbrainTask '#{self.name}' commands section
@@ -1862,13 +1849,38 @@ def submit_cluster_job
# Record runtime environment
bash #{Rails.root.to_s.bash_escape}/vendor/cbrain/bin/runtime_info.sh > #{runtime_info_basename}
-# stdout and stderr captured below will be re-substituted in
-# the output and error of this script.
-bash '#{sciencefile}' > #{science_stdout_basename} 2> #{science_stderr_basename} &2 # where stderr captured below will be substituted
+# With apptainer/singularity jobs, we sometimes get an error booting the container,
+# so we try up to five times.
+for singularity_attempts in 1 2 3 4 5 ; do # note: the number 5 is used a bit below in an 'if'
+ SECONDS=0 # this is a special bash variable, see the doc
+
+ # stdout and stderr captured below will be re-substituted in
+ # the output and error of this script here (this one!)
+ bash '#{sciencefile}' >> #{science_stdout_basename} 2>> #{science_stderr_basename} /dev/null ; then
+ break # move on, for any other error or even non zero successes
+ fi
+
+ # Detect that final attempt to boot failed
+ if test $singularity_attempts -eq 5 ; then
+ echo "Apptainer container boot attempts all failed, giving up."
+ status=99 # why not
+ break
+ fi
+
+ # Cleanup and try again
+ echo "Apptainer boot attempt number $singularity_attempts failed, trying again."
+ grep -v -i 'FATAL.*container.*creation.*failed' < #{science_stderr_basename} > #{science_stderr_basename}.clean
+ mv -f #{science_stderr_basename}.clean #{science_stderr_basename}
+done
+
+echo '__CBRAIN_CAPTURE_PLACEHOLDER__' # where stdout captured above will be substituted
+echo '__CBRAIN_CAPTURE_PLACEHOLDER__' 1>&2 # where stderr captured above will be substituted
date "+CBRAIN Task Ending With Status $status After $SECONDS seconds, at %s : %F %T"
date "+CBRAIN Task Ending With Status $status After $SECONDS seconds, at %s : %F %T" 1>&2
@@ -2305,9 +2317,7 @@ def load_docker_image_cmd #:nodoc:
# Returns true if the task's ToolConfig is configured to point to a singularity image
# for the task's processing.
def use_singularity?
- return self.tool_config.container_engine == "Singularity" &&
- ( self.tool_config.containerhub_image_name.present? ||
- self.tool_config.container_image_userfile_id.present? )
+ return self.tool_config.use_singularity?
end
# Return the 'singularity' command to be used for the task; this is fetched
@@ -2316,6 +2326,12 @@ def singularity_executable_name
return self.bourreau.singularity_executable_name.presence || "singularity"
end
+ # Returns true if the admin has configured this option in the
+ # task's ToolConfig attributes.
+ def use_singularity_short_workdir?
+ self.tool_config.singularity_use_short_workdir
+ end
+
# Returns the command line(s) associated with the task, wrapped in
# a Singularity call if a Singularity image has to be used. +command_script+
# is the raw scientific bash script.
@@ -2328,17 +2344,34 @@ def singularity_commands(command_script)
# Numbers in (paren) correspond to the comment
# block in the script, well below.
+ # (7) The path to the task's work directory
+ task_workdir = self.full_cluster_workdir # a string
+ short_workdir = "/T#{self.id}" # only used in short workdir mode
+ effect_workdir = use_singularity_short_workdir? ? short_workdir : task_workdir
+
# (1) additional singularity execution command options defined in ToolConfig
container_exec_args = self.tool_config.container_exec_args.presence
- # (2) The root of the shared area for all CBRAIN tasks
- gridshare_dir = self.bourreau.cms_shared_dir
+ # (2) The root of the DataProvider cache
+ cache_dir = self.bourreau.dp_cache_dir
- # (3) The root of the DataProvider cache
- cache_dir = self.bourreau.dp_cache_dir
+ # (3) The root of the GridShare area (all tasks workdirs)
+ gridshare_dir = self.bourreau.cms_shared_dir # not mounted explicitely
- # (6) The path to the task's work directory
- task_workdir = self.full_cluster_workdir
+ # (6) Ext3 capture mounts, if any.
+ # These will look like "-B .capt_abcd.ext3:/path/workdir/abcd:image-src=/"
+ # While we are building these options, we're also creating
+ # the ext3 filesystems at the same time, if needed.
+ esc_capture_mounts = ext3capture_basenames().inject("") do |sing_opts,(basename,size)|
+ fs_name = ".capt_#{basename}.ext3" # e.g. .capt_work.ext3
+ mountpoint = "#{effect_workdir}/#{basename}" # e.g. /path/to/workdir/work or /T123/work
+ install_ext3fs_filesystem(fs_name,size)
+ safe_mkdir(basename)
+ "#{sing_opts} -B #{fs_name.bash_escape}:#{mountpoint.bash_escape}:image-src=/"
+ end
+ # This list will be used to make a device number check: all components
+ # must be on a device different from the one for the work directory.
+ capture_basenames = ext3capture_basenames.map { |basename,_| basename }
# (4) More -B (bind mounts) for all the local data providers.
# This will be a string "-B path1 -B path2 -B path3" etc.
@@ -2359,9 +2392,28 @@ def singularity_commands(command_script)
"#{sing_opts} --overlay=#{path.bash_escape}:ro"
end
+ # Wrap new HOME environment
+ command_script = wrap_new_HOME(command_script, effect_workdir)
+
# Set singularity command
singularity_commands = <<-SINGULARITY_COMMANDS
+# Note to developers:
+# During a standard CBRAIN task, this script is invoked with no arguments
+# at all. For debugging situations, an admin can invoke it with the single
+# argument "shell" to bypass the tool's execution and launch a convenient
+# interactive shell inside the container.
+
+# These two variables control the mode switching at the end of the script.
+mode="exec"
+sing_basename=./#{singularity_wrapper_basename.bash_escape} # note: the ./ is necessary
+
+# In 'shell' mode we replace them with other things.
+if test $# -eq 1 -a "X$1" = "Xshell" ; then
+ mode="shell"
+ sing_basename=""
+fi
+
# Build a local wrapper script to run in a singularity container
cat << \"SINGULARITYJOB\" > #{singularity_wrapper_basename.bash_escape}
#!/bin/bash
@@ -2390,13 +2442,40 @@ def singularity_commands(command_script)
exit 2
fi
-
# CBRAIN internal consistency test 4: must have the gridshare_dir mounted inside the container
if test ! -d #{gridshare_dir.bash_escape} ; then
echo "Container missing mount point for gridshare directory:" #{gridshare_dir.bash_escape}
exit 2
fi
+# CBRAIN internal consistency test 5: short task workdir (optional).
+# It's possible the path below will be the same as the task workdir if no shortening
+# is configured, so the test becomes trivially like test 2.
+if test ! -d #{effect_workdir.bash_escape} ; then
+ echo "Container missing shortened task work directory:" #{effect_workdir.bash_escape}
+ exit 2
+fi
+
+# Make sure we are in the task's workdir now.
+cd #{effect_workdir.bash_escape} || exit 2
+
+# CBRAIN internal consistency test 6: all mounted ext3 filesystems should be
+# on a device different from the task's workdir. Otherwise something went
+# wrong with the mounts. Singularity or Apptainer can sometimes do that
+# if the command is improperly built (order of mounts args etc).
+workdir_devid=$(stat -c %d .) # dev number of task workdir
+for mount in #{capture_basenames.map(&:bash_escape).join(" ")} ; do
+ mnt_devid=$(stat -c %d $mount 2>/dev/null)
+ if test -z "$mnt_devid" ; then
+ echo "Container missing mount point for '$mount'."
+ exit 2
+ fi
+ if test "$workdir_devid" -eq "$mnt_devid" ; then
+ echo "Container has mount point for '$mount' but it is not mounted to an external filesystem."
+ exit 2
+ fi
+done
+
# Scientific commands start here
#{command_script}
@@ -2404,28 +2483,31 @@ def singularity_commands(command_script)
# Make sure it is executable
chmod 755 #{singularity_wrapper_basename.bash_escape}
-# Other should have executable right on all components
-# of the path in order to be mounted by singularity.
-chmod o+x . .. ../.. ../../..
# Invoke Singularity with our wrapper script above.
# Tricks used here:
# 1) we supply (if any) additional options for the exec command
-# 2) we mount the gridshare root directory
-# 3) we mount the local data provider cache root directory
-# 4) we mount each (if any) of the root directory for local data providers
-# 5) we mount (if any) file system overlays
-# 6) with -H we set the task's work directory as the singularity $HOME directory
+# 2) we mount the local data provider cache root directory
+# a) at its original cluster full path
+# b) at /DP_Cache (used only when shortening workdir)
+# 3) we mount the root of the gridshare area (for all tasks)
+# 4) we mount each (if any) of the root directories for local data providers
+# 5) we mount (if any) other fixed file system overlays
+# 6) we mount (if any) capture ext3 filesystems
+# 7) with -H we set the task's work directory as the singularity $HOME directory
#{singularity_executable_name} \\
- exec \\
+ $mode \\
#{container_exec_args} \\
- -B #{gridshare_dir.bash_escape} \\
-B #{cache_dir.bash_escape} \\
+ -B #{cache_dir.bash_escape}:/DP_Cache \\
+ -B #{gridshare_dir.bash_escape} \\
#{esc_local_dp_mountpoints} \\
#{overlay_mounts} \\
- -H #{task_workdir.bash_escape} \\
+ -B #{task_workdir.bash_escape}:#{effect_workdir.bash_escape} \\
+ #{esc_capture_mounts} \\
+ -H #{effect_workdir.bash_escape} \\
#{container_image_name.bash_escape} \\
- ./#{singularity_wrapper_basename.bash_escape}
+ $sing_basename
SINGULARITY_COMMANDS
@@ -2440,6 +2522,24 @@ def singularity_commands(command_script)
private
+ # Add HOME switching back and forth to a bash script;
+ # preserve the status returned by the script too.
+ def wrap_new_HOME(script, new_home)
+ new_home_script = <<-HOME_SWITCHING
+# Preserve system HOME, then switch it
+_cbrain_home_="$HOME"
+export HOME=#{new_home.bash_escape}
+
+#{script}
+
+# Restore system HOME (while preserving the latest exit code)
+_cbrain_status_="$?"
+export HOME="$_cbrain_home_"
+bash -c "exit $_cbrain_status_"
+ HOME_SWITCHING
+ new_home_script
+ end
+
# Returns an array of directory paths for all
# online data providers that are local to the current
# system (including smart ones). This is often needed
@@ -2455,6 +2555,43 @@ def local_dp_storage_paths #:nodoc:
dirs
end
+ # Just invokes the same method on the task's ToolConfig.
+ def ext3capture_basenames
+ self.tool_config.ext3capture_basenames
+ end
+
+ # This method creates an empty +filename+ with +size+ bytes
+ # (where size is specified like what the unix 'truncate' command accepts)
+ # and then formats it with a ext3 filesystem. If the filename already exists,
+ # nothing is done.
+ def install_ext3fs_filesystem(filename,size) #:nodoc:
+ if File.file?(filename) # already exists, all ok
+ self.addlog("EXT3 filesystem file '#{filename}' already exists")
+ return true
+ end
+
+ self.addlog("Creating EXT3 filesystem in '#{filename}' with size=#{size}")
+
+ # Create an empty file of the proper size
+ system("truncate -s #{size.bash_escape} #{filename.bash_escape}")
+ status = $? # A Process::Status object
+ if ! status.success?
+ cb_error "Cannot create EXT3 filesystem file '#{filename}': #{status.to_s}"
+ end
+
+ # Format it. Only works on linux obviously
+ system("echo y | mkfs.ext3 -t ext3 -m 0 -q -E root_owner #{filename.bash_escape}")
+ status = $? # A Process::Status object
+ if ! status.success?
+ cb_error "Cannot format EXT3 filesystem file '#{filename}': #{status.to_s}"
+ end
+
+ true
+ rescue => ex
+ File.unlink(filename) rescue nil # keep directory clean of broken ext3 file
+ raise ex
+ end
+
##################################################################
diff --git a/BrainPortal/app/models/data_provider.rb b/BrainPortal/app/models/data_provider.rb
index 1fc072b47..96e0f22e6 100644
--- a/BrainPortal/app/models/data_provider.rb
+++ b/BrainPortal/app/models/data_provider.rb
@@ -238,6 +238,8 @@ class DataProvider < ApplicationRecord
:message => 'is invalid as only paths with simple characters are valid: a-z, A-Z, 0-9, _, +, =, . and of course /',
:allow_blank => true
+ validate :owner_is_appropriate
+
belongs_to :user
belongs_to :group
has_many :userfiles, :dependent => :restrict_with_exception
@@ -731,6 +733,9 @@ def provider_move_to_otherprovider(userfile, otherprovider, options = {})
return false unless Userfile.is_legal_filename?(new_name)
return false unless userfile.id # must be a fully saved file
+ # Check quota at destination
+ DiskQuota.exceeded!(new_user_id, otherprovider.id)
+
# Find existing destination, if any
target_exists = Userfile.where(
:name => new_name,
@@ -828,6 +833,9 @@ def provider_copy_to_otherprovider(userfile, otherprovider, options = {})
return false unless Userfile.is_legal_filename?(new_name)
return false unless userfile.id # must be a fully saved file
+ # Check quota at destination
+ DiskQuota.exceeded!(new_user_id, otherprovider.id)
+
# Find existing destination, if any
target_exists = Userfile.where(
:name => new_name,
@@ -1059,6 +1067,21 @@ def for_api
+ #################################################################
+ # Model Callbacks
+ #################################################################
+
+ # This verifies that the user_id matches an Admin user.
+ # For security reason, no data providers should by default
+ # be owned by normal users.
+ #
+ # This method can be overrided in subclasses.
+ def owner_is_appropriate #:nodoc:
+ return true if User.where(:id => self.user_id).first.is_a?(AdminUser)
+ self.errors.add(:user_id, 'must be an administrator')
+ return false
+ end
+
#################################################################
# Class-level cache-handling methods
#################################################################
diff --git a/BrainPortal/app/models/disk_quota.rb b/BrainPortal/app/models/disk_quota.rb
index a29696cb8..2e31c9ace 100644
--- a/BrainPortal/app/models/disk_quota.rb
+++ b/BrainPortal/app/models/disk_quota.rb
@@ -20,7 +20,23 @@
# along with this program. If not, see .
#
-# Model representing disk quotas
+# Model representing disk quotas.
+#
+# We have two type of quota records:
+# 1) User-specific quotas on a specific DP (user_id > 0)
+# 2) DP-wide quotas applying for all users (user_id == 0)
+#
+# Quotas are verified by callbacks in the Userfile model.
+# The methods here will try to fetch and process and
+# user's specific quota (user_id and data_provider_id both set)
+# before it falls back to the DP-wise quota (user_id == 0)
+#
+# The two main attributes are :max_size and :max_files
+# which puts limit to the sum(size) and sum(num_files)
+# of all the userfiles owned by a user on a specific DP.
+#
+# A quota record can be configuered with -1,-1, which prevents
+# a user from creating any file at all on a DP.
class DiskQuota < ApplicationRecord
Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
@@ -29,6 +45,7 @@ class DiskQuota < ApplicationRecord
validates_presence_of :data_provider_id
validates_presence_of :max_bytes
validates_presence_of :max_files
+ validate :limits_are_reasonable
belongs_to :user, :optional => true # the value can be 0 but not nil
belongs_to :data_provider, :optional => false
@@ -39,11 +56,23 @@ class DiskQuota < ApplicationRecord
attr_reader :cursize, :curfiles # values are filled when performing a check
+ def is_for_dp? #:nodoc:
+ self.user_id == 0
+ end
+
+ def is_for_user? #:nodoc:
+ self.user_id != 0
+ end
+
+ def none_allowed? #:nodoc:
+ self.max_files == -1
+ end
+
# Returns true if currently, the user specified by +user_id+
# uses more disk space or more total files on +data_provider_id+ than
# the quota limit configured by the admin.
#
- # The quota record for the limites is first looked up specifically for the pair
+ # The quota record for the limits is first looked up specifically for the pair
# (user, data_provider); if no quota record is found, the pair (0, data_provider)
# will be fetched instead (meaning a default quota for all users on that DP)
#
@@ -70,7 +99,7 @@ def self.exceeded!(user_id, data_provider_id)
# Returns true if currently, the user specified by +user+ (specified by id)
# uses more disk space or more total files on than configured in the limits
# of this quota object. Since a quota object can contain '0' for the user attribute
- # (meaning it's a default for all users), a user_id musy be given explicitely
+ # (meaning it's a default for all users), a user_id must be given explicitely
# in argument in that case.
def exceeded?(user_id = self.user_id)
@@ -108,4 +137,35 @@ def exceeded!(user_id = self.user_id)
raise CbrainDiskQuotaExceeded.new(user_id, self.data_provider_id)
end
+ #####################################################
+ # Validations callbacks
+ #####################################################
+
+ # Checks that both limits have proper values.
+ # 1) Both values are >= 0 : all OK
+ # 2) max_bytes == -1 and max_files == -1 : locked quota (no other negative numbers are allowed)
+ #
+ # Note that a value of 0 will still allow a user to create ONE userfile entry,
+ # because quota failures happen only after the quota is exceeded. That's
+ # why a value of -1 exists, it prevents any files from being created.
+ #
+ # A DP-wode quota of (-1,-1) will prevent ALL users from creating files on a DP
+ # (similar than having the DP set to read-only) but you can give special privileges
+ # to individual users by creating user-specific quota records.
+ def limits_are_reasonable
+
+ # Already checked by other validate_presence callbacks
+ return false if self.max_bytes.blank? || self.max_files.blank?
+
+ # All quotas are OK with this rule
+ return true if (self.max_bytes >= 0 && self.max_files >= 0)
+ # Only -1 in both fields is allowed if using negative numbers
+ return true if (self.max_bytes == -1 && self.max_files == -1)
+
+ # Log errors
+ self.errors.add(:max_bytes, "must be -1, 0 or > 0") if self.max_bytes < -1
+ self.errors.add(:max_files, "must be -1, 0 or > 0") if self.max_files < -1
+ self.errors.add(:base, "when using -1, both limits must be set to -1") if self.max_bytes == -1 || self.max_files == -1
+ end
+
end
diff --git a/BrainPortal/app/models/remote_resource.rb b/BrainPortal/app/models/remote_resource.rb
index 0ee6efdb3..750eb45e6 100644
--- a/BrainPortal/app/models/remote_resource.rb
+++ b/BrainPortal/app/models/remote_resource.rb
@@ -58,7 +58,6 @@ class RemoteResource < ApplicationRecord
# can only be set by the admin using the Rails console; leaving them blank
# means the mailers use whatever is configured in the Rails environment config.
serialize :email_delivery_options
- serialize :nh_email_delivery_options
validates :name,
:uniqueness => true,
@@ -525,8 +524,8 @@ def get_ssh_public_key #:nodoc:
cb_error "SSH public key only accessible for the current resource." unless self.id == self.class.current_resource.id
return @ssh_public_key if @ssh_public_key
home = CBRAIN::Rails_UserHome
- if File.exists?("#{home}/.ssh/id_cbrain_portal.pub")
- @ssh_public_key = File.read("#{home}/.ssh/id_cbrain_portal.pub") rescue ""
+ if File.exists?("#{home}/.ssh/id_cbrain_ed25519.pub")
+ @ssh_public_key = File.read("#{home}/.ssh/id_cbrain_ed25519.pub") rescue ""
else
@ssh_public_key = ""
end
diff --git a/BrainPortal/app/models/s3_flat_data_provider.rb b/BrainPortal/app/models/s3_flat_data_provider.rb
index d0a297b51..dc4b42a35 100644
--- a/BrainPortal/app/models/s3_flat_data_provider.rb
+++ b/BrainPortal/app/models/s3_flat_data_provider.rb
@@ -77,6 +77,11 @@ def s3_connection
)
end
+ def reset_connection #:nodoc:
+ Aws.empty_connection_pools! rescue nil
+ @s3_connection = nil
+ end
+
def impl_is_alive? #:nodoc:
return true if s3_connection.connected?
# Try to create the bucket once
@@ -302,9 +307,11 @@ def cache_recursive_fileinfos(userfile) #:nodoc:
cache_parent = cache_fullpath.parent
parent_length = "#{cache_parent}/".length # used in substr below
glob_pattern = userfile.is_a?(FileCollection) ? "/**/*" : ""
- Dir.glob("#{userfile.cache_full_path}#{glob_pattern}").map do |fullpath| # /path/to/userfilebase/d1/d2/f1.txt
- stats = File.lstat(fullpath) # not stat() !
- relpath = fullpath[parent_length,999999] # userfilebase/d1/d2/f1.txt
+ Dir.glob("#{userfile.cache_full_path}#{glob_pattern}", File::FNM_DOTMATCH).map do |fullpath| # /path/to/userfilebase/d1/d2/f1.txt
+ next if fullpath.ends_with? "/." # skip spurious entries for self-referencing sub directories
+ next if fullpath.ends_with? "/.." # skip spurious entries for referencing parent directories (never happens?)
+ stats = File.lstat(fullpath) # not stat() !
+ relpath = fullpath[parent_length,999999] # userfilebase/d1/d2/f1.txt
# This struct is defined in DataProvider
FileInfo.new(
:name => relpath,
@@ -321,7 +328,7 @@ def cache_recursive_fileinfos(userfile) #:nodoc:
:ctime => stats.ctime,
:mtime => stats.mtime,
)
- end.compact # the compact is in case we ever insert a 'next' in the map() above
+ end.compact
end
# Scan the Amazon bucket and returns a list of FileInfo objects
@@ -390,8 +397,25 @@ def rsync_emulation(src_fileinfos,dst_fileinfos) #:nodoc:
src_idx = src_fileinfos.index_by { |fi| fi.name }
dst_idx = dst_fileinfos.index_by { |fi| fi.name }
+ # Hash of all possible directory prefixes at source
+ all_src_prefixes = src_idx
+ .keys # names of all source files and dirs
+ .map { |path| File.dirname(path) } # parents of all of them
+ .uniq
+ .map do |dirpath|
+ prefixes = [ dirpath ]
+ while (parent=File.dirname(dirpath)) != '.'
+ raise "Woh, got an absolute path back to root filesystem?!?" if parent == '/'
+ prefixes << parent
+ dirpath = parent
+ end
+ prefixes
+ end
+ .flatten
+ .index_by(&:itself) # will also do uniq
+
# Build two lists
- delete_dest = dst_fileinfos.select { |fi| ! src_idx[fi.name] }
+ delete_dest = dst_fileinfos.select { |fi| ! src_idx[fi.name] && ! all_src_prefixes[fi.name] }
delete_dest += dst_fileinfos.select { |fi| src_idx[fi.name] && src_idx[fi.name].symbolic_type != fi.symbolic_type }
add_dest = src_fileinfos.select do |src_fi|
diff --git a/BrainPortal/app/models/sing_bindmount_data_provider.rb b/BrainPortal/app/models/sing_bindmount_data_provider.rb
index 8799a7f8b..516c70df4 100644
--- a/BrainPortal/app/models/sing_bindmount_data_provider.rb
+++ b/BrainPortal/app/models/sing_bindmount_data_provider.rb
@@ -160,10 +160,9 @@ def impl_sync_to_cache(userfile) #:nodoc:
# As of rsync 3.1.2, rsync does the escaping of the remote path properly itself
source_escaped = remotefull.to_s.bash_escape if self.class.local_rsync_protects_args?
text = bash_this("#{rsync} -a -l --no-g --chmod=u=rwX,g=rX,Dg+s,o=r --delete #{self.rsync_excludes} #{source_colon}#{source_escaped}#{sourceslash} #{shell_escape(localfull)} 2>&1")
- text.sub!(/Warning: Permanently added[^\n]+known hosts.\s*/i,"") # a common annoying warning
- cb_error "Error syncing userfile to local cache, rsync returned:\n#{text}" unless text.blank?
+ cb_error "Error syncing userfile ##{userfile.id} to local cache, rsync returned:\n#{text}" unless text.blank?
unless File.exist?(localfull)
- cb_error "Error syncing userfile to local cache: no destination file found after rsync?\n" +
+ cb_error "Error syncing userfile ##{userfile.id} to local cache: no destination file found after rsync?\n" +
"Make sure you are running rsync 3.0.6 or greater!"
end
true
diff --git a/BrainPortal/app/models/sing_squashfs_data_provider.rb b/BrainPortal/app/models/sing_squashfs_data_provider.rb
index d41e9dce4..f2c352321 100644
--- a/BrainPortal/app/models/sing_squashfs_data_provider.rb
+++ b/BrainPortal/app/models/sing_squashfs_data_provider.rb
@@ -128,11 +128,13 @@ def impl_sync_to_cache(userfile) #:nodoc:
source_escaped = provider_is_remote ? remote_shell_escape(remotefull) : remotefull.to_s.bash_escape
# As of rsync 3.1.2, rsync does the escaping of the remote path properly itself
source_escaped = remotefull.to_s.bash_escape if self.class.local_rsync_protects_args?
+ # We need the SSH agent even when doing local transfers
+ CBRAIN.with_unlocked_agent
+
text = bash_this("#{rsync} -a -l --no-g --chmod=u=rwX,g=rX,Dg+s,o=r --delete #{self.rsync_excludes} #{source_colon}#{source_escaped}#{sourceslash} #{shell_escape(localfull)} 2>&1")
- text.sub!(/Warning: Permanently added[^\n]+known hosts.\s*/i,"") # a common annoying warning
- cb_error "Error syncing userfile to local cache, rsync returned:\n#{text}" unless text.blank?
+ cb_error "Error syncing userfile ##{userfile.id} to local cache, rsync returned:\n#{text}" unless text.blank?
unless File.exist?(localfull)
- cb_error "Error syncing userfile to local cache: no destination file found after rsync?\n" +
+ cb_error "Error syncing userfile ##{userfile.id} to local cache: no destination file found after rsync?\n" +
"Make sure you are running rsync 3.0.6 or greater!"
end
true
@@ -279,7 +281,7 @@ def get_squashfs_basenames(force = false) #:nodoc:
remote_cmd = "cd #{self.remote_dir.bash_escape} && ls -1"
text = self.remote_bash_this(remote_cmd)
lines = text.split("\n")
- @sq_files = lines.select { |l| l =~ /\A\S+\.(squashfs|sqs)\z/ }.sort
+ @sq_files = lines.select { |l| l =~ /\A\S+\.(squashfs|sqs|sqfs)\z/ }.sort
self.meta[:squashfs_basenames] = @sq_files
end
diff --git a/BrainPortal/app/models/squashifier_en_cbrain_ssh_data_provider.rb b/BrainPortal/app/models/squashifier_en_cbrain_ssh_data_provider.rb
new file mode 100644
index 000000000..26135a1c4
--- /dev/null
+++ b/BrainPortal/app/models/squashifier_en_cbrain_ssh_data_provider.rb
@@ -0,0 +1,170 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This DataProvider class implements a remote SSH-accessible data provider
+# using the EnCbrainSshDataProvider file structure, but with an added
+# functionality:
+#
+# 1) the content of any FileCollection will be locally
+# squashed with the command 'mksquashfs' before being sent to the
+# data provider side (aka synchronizing to the provider).
+#
+# 2) Conversely a FileCollection's content will be unsquashed with
+# the command 'unsquashfs' when synchronizing to the cache.
+#
+# This DP requires the Rails application to have access to these
+# two commands, of course.
+#
+# Note that this DP cannot be made into a 'Smart' version, since the
+# content on the DP side is always different from the content on th
+# cache side, even when working with both under the same host. So
+# there will always be a SSH upload and download operation whenever
+# syncing to or from the cache.
+#
+# TODO refactor to avoid code duplication. That would require creating a new
+# base-class abstract method cache_full_path_for_upload() and
+# cache_full_path_for_download() (suggested names) that DP subclasses
+# would use to implement their data transfer methods, distinct from
+# cache_full_pathname()
+class SquashifierEnCbrainSshDataProvider < EnCbrainSshDataProvider
+
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # Name of the squashfs file that will contain a filecollection's squashified content
+ # This is a constant name independant of what the collection's own name is.
+ SQ_BASENAME = "CBRAIN_SquashedContent.squashfs"
+
+ # This returns the category of the data provider
+ def self.pretty_category_name #:nodoc:
+ "Squashifier Enhanced SSH CBRAIN"
+ end
+
+ def impl_sync_to_cache(userfile) #:nodoc:
+ return super if userfile.is_a?(SingleFile)
+
+ # Normal code will fetch (remote) basename/basename.squashfs into (local) basename/basename.squashfs
+ super
+
+ # Then we unsquash it all
+ fullpath = cache_full_path(userfile) # path to dir; in it is the .squashfs file
+ cacheparent = fullpath.parent
+ basename = userfile.name
+ tmpdirbase = ".tmp.unsq.#{Process.pid}"
+ unsqu_out = bash_this(
+ "cd #{cacheparent.to_s.bash_escape} && " +
+ "mv #{basename.bash_escape} #{tmpdirbase} && " +
+ "unsquashfs -f -n -p 1 -no-xattrs -d #{basename.bash_escape} #{tmpdirbase.bash_escape}/#{SQ_BASENAME.bash_escape} 2>&1 1>/dev/null && " +
+ "rm -rf #{tmpdirbase}"
+ )
+ # Perform cleanup of expected messages (stupid unsquashfs program is too verbose)
+ #[
+ # # In the following regexps, the trailing .* match anything to NL (but not including it)
+ # /^Parallel unsquashfs: Using \d+ processor.*/i,
+ # /^\d+ inodes.*to write.*/i,
+ # /^created \d+.*/i,
+ #].each { |regex| unsqu_out.sub!(regex,"") }
+ unsqu_out.strip! # remove all blanks on each side, whatever's left is the culprit
+ cb_error "Error syncing userfile ##{userfile.id} to local cache, unsquashfs commands returned:\n#{unsqu_out}" unless unsqu_out.blank?
+ true
+ end
+
+ def impl_sync_to_provider(userfile) #:nodoc:
+ return super if userfile.is_a?(SingleFile)
+
+ fullpath = cache_full_path(userfile) # without SQEXT
+ cacheparent = fullpath.parent
+ basename = userfile.name
+ tmpdirbase = ".tmp.mksq.#{Process.pid}"
+ # Note about the mksquashfs command: by supplying a single source argument ('basename'), the
+ # *content* of that directory is put directly at the top of the created squashfs filesystem.
+ mem_opt = self.class.mksquashfs_has_mem_option? ? "-mem 64m" : ""
+ mksqu_out = bash_this(
+ "cd #{cacheparent.to_s.bash_escape} && " +
+ "mkdir -p #{tmpdirbase.bash_escape} && " +
+ "mksquashfs #{basename.bash_escape} #{tmpdirbase.bash_escape}/#{SQ_BASENAME.bash_escape} -processors 1 -no-progress -noappend -no-xattrs -noD -noI -noF #{mem_opt} 2>&1 1>/dev/null || echo mksquashfs command failed"
+ )
+ # Perform cleanup of expected messages (stupid mksquashfs program is too verbose)
+ #[
+ # # In the following regexps, the trailing .* match anything to NL (but not including it)
+ # /^created \d+.*/i,
+ #].each { |regex| unsqu_out.sub!(regex,"") }
+ mksqu_out.strip!
+ cb_error "Error syncing userfile ##{userfile.id} to provider, mksquashfs commands returned:\n#{mksqu_out}" unless mksqu_out.blank?
+
+ # Invoke the normal code; duplicated from superclasses unfortunately
+
+ # -------
+ # Prep code from EnCbrainSshDataProvider (simplified a little)
+ # -------
+ threelevels = cache_subdirs_from_id(userfile.id)
+ remcachedir = Pathname.new(remote_dir) + threelevels[0] + threelevels[1] + threelevels[2]
+ mkdir_command = "mkdir -p #{remcachedir.to_s.bash_escape} >/dev/null 2>&1"
+ remote_bash_this(mkdir_command)
+ # -------
+ # End of EnCbrainSshDataProvider code
+ # -------
+
+ # -------
+ # rsync upload code from SshDataProvider, adjusted
+ # -------
+ localfull = cacheparent + tmpdirbase
+ remotefull = provider_full_path(userfile)
+ cb_error "Error: directory #{localfull} does not exist in local cache!" unless File.exist?(localfull)
+
+ sourceslash = "/" # constant this time
+ rsync = rsync_over_ssh_prefix(userfile.user, userfile)
+
+ # Double escaping for old rsyncs
+ dest_escaped = remote_shell_escape(remotefull)
+ # As of rsync 3.1.2, rsync does the escaping of the remote path properly itself
+ dest_escaped = remotefull.to_s.bash_escape if self.class.local_rsync_protects_args?
+
+ # It's IMPORTANT that the destination be specified with a bare ':' in front.
+ text = bash_this("#{rsync} -a -l --no-g --chmod=u=rwX,g=rX,Dg+s,o=r --delete #{self.rsync_excludes} #{shell_escape(localfull)}#{sourceslash} :#{dest_escaped} 2>&1")
+ cb_error "Error syncing userfile ##{userfile.id} to data provider, rsync returned:\n#{text}" unless text.blank?
+ unless self.provider_file_exists?(userfile).to_s =~ /file|dir/
+ cb_error "Error syncing userfile ##{userfile.id} to data provider: no destination file found after rsync?\n" +
+ "Make sure you are running rsync 3.0.6 or greater!\n"
+ end
+ # -------
+ # End of SshDataProvider code
+ # -------
+
+ true
+ ensure
+ # Cleanup local squashfs file no matter what
+ if cacheparent.to_s.present? && tmpdirbase.present? && File.directory?("#{cacheparent.to_s.bash_escape}/#{tmpdirbase.bash_escape}")
+ system "rm -rf #{cacheparent.to_s.bash_escape}/#{tmpdirbase.bash_escape}"
+ end
+ end
+
+ # Check the capabilities of the local mksquashfs program.
+ # Returns true if it has -mem . Value cached in class variable.
+ def self.mksquashfs_has_mem_option?
+ return @_mksquashfs_mem_ if defined?(@_mksquashfs_mem_)
+ system "mksquashfs 2>&1 | grep -e -mem >/dev/null"
+ @_mksquashfs_mem_ = $?.success?
+ @_mksquashfs_mem_
+ end
+
+end
+
diff --git a/BrainPortal/app/models/ssh_data_provider.rb b/BrainPortal/app/models/ssh_data_provider.rb
index 60d6b0863..5bcd23252 100644
--- a/BrainPortal/app/models/ssh_data_provider.rb
+++ b/BrainPortal/app/models/ssh_data_provider.rb
@@ -87,10 +87,9 @@ def impl_sync_to_cache(userfile) #:nodoc:
# It's IMPORTANT that the source be specified with a bare ':' in front.
text = bash_this("#{rsync} -a -l --no-g --chmod=u=rwX,g=rX,Dg+s,o=r --delete #{self.rsync_excludes} :#{source_escaped}#{sourceslash} #{shell_escape(localfull)} 2>&1")
- text.sub!(/Warning: Permanently added[^\n]+known hosts.\s*/i,"") # a common annoying warning
- cb_error "Error syncing userfile to local cache, rsync returned:\n#{text}" unless text.blank?
+ cb_error "Error syncing userfile ##{userfile.id} to local cache, rsync returned:\n#{text}" unless text.blank?
unless File.exist?(localfull)
- cb_error "Error syncing userfile to local cache: no destination file found after rsync?\n" +
+ cb_error "Error syncing userfile ##{userfile.id} to local cache: no destination file found after rsync?\n" +
"Make sure you are running rsync 3.0.6 or greater!"
end
true
@@ -112,9 +111,9 @@ def impl_sync_to_provider(userfile) #:nodoc:
# It's IMPORTANT that the destination be specified with a bare ':' in front.
text = bash_this("#{rsync} -a -l --no-g --chmod=u=rwX,g=rX,Dg+s,o=r --delete #{self.rsync_excludes} #{shell_escape(localfull)}#{sourceslash} :#{dest_escaped} 2>&1")
text.sub!(/Warning: Permanently added[^\n]+known hosts.\s*/i,"") # a common annoying warning
- cb_error "Error syncing userfile to data provider, rsync returned:\n#{text}" unless text.blank?
+ cb_error "Error syncing userfile ##{userfile.id} to data provider, rsync returned:\n#{text}" unless text.blank?
unless self.provider_file_exists?(userfile).to_s =~ /file|dir/
- cb_error "Error syncing userfile to data provider: no destination file found after rsync?\n" +
+ cb_error "Error syncing userfile ##{userfile.id} to data provider: no destination file found after rsync?\n" +
"Make sure you are running rsync 3.0.6 or greater!\n"
end
true
@@ -298,8 +297,68 @@ def impl_provider_repair(issue) #:nodoc:
super(issue)
end
+ # Checks connection and other common problems.
+ # Raises exception DataProviderTestConnectionError if connection is down or
+ # common config issues detected. Returns true if everything is OK.
+ def check_connection!
+ err_message = self.find_connection_issues
+ raise DataProviderTestConnectionError.new(err_message) if err_message.present?
+ true
+ end
+
protected
+ # Verifies the configuration and returns a string with a descriptive
+ # error message if something is wrong.
+ def find_connection_issues
+ master = self.master # This is a handler for the connection, not persistent.
+ tmpfile = "/tmp/dp_check.#{Process.pid}.#{rand(1000000)}" # prefix for .out and .err capture files
+
+ # Check #1: the SSH connection can be established
+ if ! master.is_alive?
+ return "Cannot establish the SSH connection. Check the configuration: username, hostname, port are valid, and SSH key is installed."
+ end
+
+ # Check #2: we can run "true" on the remote site and get no output
+ status = master.remote_shell_command_reader("true",
+ :stdin => "/dev/null",
+ :stdout => "#{tmpfile}.out",
+ :stderr => "#{tmpfile}.err"
+ )
+ stdout = File.read("#{tmpfile}.out") rescue "Error capturing stdout"
+ stderr = File.read("#{tmpfile}.err") rescue "Error capturing stderr"
+ if stdout.size != 0
+ stdout.strip! if stdout.present? # just to make it pretty while still reporting whitespace-only strings
+ return "Remote shell is not clean: got some bytes on stdout: '#{stdout}'"
+ end
+ if stderr.size != 0
+ stderr.strip! if stdout.present?
+ return "Remote shell is not clean: got some bytes on stderr: '#{stderr}'"
+ end
+ if !status
+ return "Got non-zero return code when trying to run 'true' on remote side."
+ end
+
+ # Check #3: the remote directory exists
+ master.remote_shell_command_reader "test -d #{self.remote_dir.bash_escape} && echo DIR-OK", :stdout => "#{tmpfile}.out"
+ out = File.read("#{tmpfile}.out")
+ if out != "DIR-OK\n"
+ return "The remote directory doesn't seem to exist."
+ end
+
+ # Check #4: the remote directory is readable
+ master.remote_shell_command_reader "test -r #{self.remote_dir.bash_escape} && test -x #{self.remote_dir.bash_escape} && echo DIR-READ", :stdout => "#{tmpfile}.out"
+ out = File.read("#{tmpfile}.out")
+ if out != "DIR-READ\n"
+ return "The remote directory doesn't seem to be readable"
+ end
+
+ return nil # No error messages means all is OK
+ ensure
+ File.unlink("#{tmpfile}.out") rescue nil
+ File.unlink("#{tmpfile}.err") rescue nil
+ end
+
# Returns a list of all files in remote directory +dirname+, with all their
# associated metadata; size, permissions, access times, owner, group, etc.
def remote_dir_entries(dirname, user = nil, userfile = nil)
diff --git a/BrainPortal/app/models/ssh_data_provider_base.rb b/BrainPortal/app/models/ssh_data_provider_base.rb
index d0dc188c9..d2b002e16 100644
--- a/BrainPortal/app/models/ssh_data_provider_base.rb
+++ b/BrainPortal/app/models/ssh_data_provider_base.rb
@@ -107,6 +107,29 @@ def remote_bash_this(command, user = nil, userfile = nil)
self.master(user, userfile).remote_shell_command_reader(command, :stdin => '/dev/null') do |fh|
text = fh.read
end
+
+ filter_out_ssh_stderr_messages(text)
+ end
+
+ # Same as superclass, but because we sometimes issue local commands
+ # that indirectly connect through ssh, we also filter out the same error
+ # messages as in remote_bash_this().
+ def bash_this(command) #:nodoc:
+ text = super
+ filter_out_ssh_stderr_messages(text)
+ end
+
+ private
+
+ # Remove common warning messages generally printed on stderr...
+ def filter_out_ssh_stderr_messages(text)
+ # 1) From ssh
+ text.sub!(/^Warning: Permanently added[^\n]+known hosts.\s*/i,"") # a common annoying warning
+
+ # 1) From ssh-keysign when ssh to localhost from a different GID
+ text.sub!(/^setresgid \d+: Operation not permitted\s*/i,"")
+ text.sub!(/^ssh_keysign: no reply\s*/i,"")
+ text.sub!(/^sign using hostkey.*failed\s*/i,"")
text
end
diff --git a/BrainPortal/app/models/sync_status.rb b/BrainPortal/app/models/sync_status.rb
index 643f723e6..11bedf11c 100644
--- a/BrainPortal/app/models/sync_status.rb
+++ b/BrainPortal/app/models/sync_status.rb
@@ -126,42 +126,51 @@ def self.ready_to_copy_to_cache(userfile)
unless userfile_id
return yield
end
+ prettyfile = "'#{userfile.name}' (##{userfile_id})" # for messages
state = self.get_or_create_status(userfile_id)
puts "SYNC: ToCache: #{state.pretty} Enter" if DebugMessages
- # Wait until no other local client is copying the file's content
- # in one direction or the other.
- allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
- state.reload
- state.invalidate_old_status
- puts "SYNC: ToCache: #{state.pretty} Check" if DebugMessages
- state.status !~ /^To/ # no ToProvider or ToCache
- end
- puts "SYNC: ToCache: #{state.pretty} Proceed" if DebugMessages
+ # This loops attempts to wait for and then lock out other
+ # processes on the same server.
+ 2.times do
+
+ # Wait until no other local client is copying the file's content
+ # in one direction or the other.
+ allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
+ state.reload
+ state.invalidate_old_status
+ puts "SYNC: ToCache: #{state.pretty} Check" if DebugMessages
+ state.status !~ /^To/ # no ToProvider or ToCache
+ end
+ puts "SYNC: ToCache: #{state.pretty} Proceed" if DebugMessages
- if ! allok # means timeout occurred
- oldstate = state.status
- #state.status_transition(oldstate, "ProvNewer") # do our best; not needed?
- raise "Sync error: timeout waiting for file '#{userfile_id}' " +
- "in '#{oldstate}' for operation 'ToCache'."
- end
+ if ! allok # means timeout occurred
+ oldstate = state.status
+ raise "Sync error: timeout waiting for #{prettyfile} in '#{oldstate}' for operation 'ToCache'."
+ end
- # No need to do anything if the data is already in sync!
- if state.status == "InSync"
- state.update_attributes( :accessed_at => Time.now )
- return true
- end
+ # No need to do anything if the data is already in sync!
+ if state.status == "InSync"
+ state.update_attributes( :accessed_at => Time.now )
+ return true
+ end
- if state.status == "Corrupted"
- raise "Sync error: file '#{userfile_id}' marked 'Corrupted' " +
- "for operation 'ToCache'."
- end
+ # This can be set by invalidate_old_status above
+ if state.status == "Corrupted"
+ raise "Sync error: #{prettyfile} marked 'Corrupted' for operation 'ToCache'."
+ end
+
+ # Adjust state to let all other processes know what
+ # WE want to do now. This will lock out other clients.
+ break if state.status_transition(state.status, "ToCache") # if we fail here, race condition
+
+ end # loop 2 times
- # Adjust state to let all other processes know what
- # WE want to do now. This will lock out other clients.
- state.status_transition!(state.status, "ToCache") # if we fail here, race condition
puts "SYNC: ToCache: #{state.pretty} Update" if DebugMessages
+ if state.status != 'ToCache'
+ raise "Sync error: #{prettyfile} cannot be fetched after two attempts. Status=#{state.status}"
+ end
# Wait until all other clients out there are done
# transferring content to the DP side. We don't care
@@ -175,8 +184,7 @@ def self.ready_to_copy_to_cache(userfile)
if ! allok # means timeout occurred
state.status_transition("ToCache", "ProvNewer") # checked OK
- raise "Sync error: timeout waiting for other clients for " +
- "file '#{userfile_id}' for operation 'ToCache'."
+ raise "Sync error: timeout waiting for other clients for #{prettyfile} for operation 'ToCache'."
end
# Now, perform the sync_to_cache operation.
@@ -220,37 +228,46 @@ def self.ready_to_copy_to_dp(userfile)
unless userfile_id
return yield
end
+ prettyfile = "'#{userfile.name}' (##{userfile_id})" # for messages
state = self.get_or_create_status(userfile_id)
puts "SYNC: ToProv: #{state.pretty} Enter" if DebugMessages
- # Wait until no other local client is copying the file's content
- # in one direction or the other.
- allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
- state.reload
- state.invalidate_old_status
- puts "SYNC: ToProv: #{state.pretty} Check" if DebugMessages
- state.status !~ /^To/ # no ToProvider or ToCache
- end
- puts "SYNC: ToProv: #{state.pretty} Proceed" if DebugMessages
+ # This loops attempts to wait for and then lock out other
+ # processes on the same server.
+ 2.times do
+
+ # Wait until no other local client is copying the file's content
+ # in one direction or the other.
+ allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
+ state.reload
+ state.invalidate_old_status
+ puts "SYNC: ToProv: #{state.pretty} Check" if DebugMessages
+ state.status !~ /^To/ # no ToProvider or ToCache
+ end
+ puts "SYNC: ToProv: #{state.pretty} Proceed" if DebugMessages
- if ! allok # means timeout occurred
- oldstate = state.status
- #state.status_transition(oldstate, "CacheNewer") # do our best; not needed?
- raise "Sync error: timeout waiting for file '#{userfile_id}' " +
- "in '#{oldstate}' for operation 'ToProvider'."
- end
+ if ! allok # means timeout occurred
+ oldstate = state.status
+ raise "Sync error: timeout waiting for #{prettyfile} in '#{oldstate}' for operation 'ToProvider'."
+ end
- # No need to do anything if the data is already in sync!
- if state.status == "InSync"
- state.update_attributes( :accessed_at => Time.now )
- return true
- end
+ # No need to do anything if the data is already in sync!
+ if state.status == "InSync"
+ state.update_attributes( :accessed_at => Time.now )
+ return true
+ end
+
+ # Adjust state to let all other processes know what
+ # WE want to do now. This will lock out other clients.
+ break if state.status_transition(state.status, "ToProvider") # if we fail, race condition
+
+ end # loop 2 times
- # Adjust state to let all other processes know what
- # WE want to do now. This will lock out other clients.
- state.status_transition!(state.status, "ToProvider") # if we fail, race condition
puts "SYNC: ToProv: #{state.pretty} Update" if DebugMessages
+ if state.status != 'ToProvider'
+ raise "Sync error: #{prettyfile} cannot be uploaded after two attempts. Status=#{state.status}"
+ end
# Wait until all other clients out there are done
# transferring content to/from the provider, one way or the other.
@@ -263,8 +280,7 @@ def self.ready_to_copy_to_dp(userfile)
if ! allok # means timeout occurred
state.status_transition("ToProvider", "CacheNewer") # checked OK
- raise "Sync error: timeout waiting for other clients for " +
- "file '#{userfile_id}' for operation 'ToProvider'."
+ raise "Sync error: timeout waiting for other clients for #{prettyfile} for operation 'ToProvider'."
end
# Now, perform the ToProvider operation.
@@ -318,33 +334,43 @@ def self.ready_to_modify_cache(userfile, final_status = 'CacheNewer')
unless userfile_id
return yield
end
+ prettyfile = "'#{userfile.name}' (##{userfile_id})" # for messages
state = self.get_or_create_status(userfile_id)
puts "SYNC: ModCache: #{state.pretty} Enter" if DebugMessages
- # Wait until no other local client is copying the file's content
- # in one direction or the other.
- allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
- state.reload
- state.invalidate_old_status
- puts "SYNC: ModCache: #{state.pretty} Check" if DebugMessages
- state.status !~ /^To/ # no ToProvider or ToCache
- end
- puts "SYNC: ModCache: #{state.pretty} Proceed" if DebugMessages
+ # This loops attempts to wait for and then lock out other
+ # processes on the same server.
+ 2.times do
+
+ # Wait until no other local client is copying the file's content
+ # in one direction or the other.
+ allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
+ state.reload
+ state.invalidate_old_status
+ puts "SYNC: ModCache: #{state.pretty} Check" if DebugMessages
+ state.status !~ /^To/ # no ToProvider or ToCache
+ end
+ puts "SYNC: ModCache: #{state.pretty} Proceed" if DebugMessages
- if ! allok # means timeout occurred
- oldstate = state.status
- raise "Sync error: timeout waiting for file '#{userfile_id}' " +
- "in '#{oldstate}' for operation 'ModifyCache'."
- end
+ if ! allok # means timeout occurred
+ oldstate = state.status
+ raise "Sync error: timeout waiting for #{prettyfile} in '#{oldstate}' for operation 'ModifyCache'."
+ end
+
+ # Adjust state to let all other processes know that
+ # we want to modify the cache. "ToCache" is not exactly
+ # true, as we are not copying from the DP, but it will
+ # still lock out other processes trying to start data
+ # operations, which is what we want.
+ break if state.status_transition(state.status, "ToCache") # if we fail, race condition
+
+ end # loop 2 times
- # Adjust state to let all other processes know that
- # we want to modify the cache. "ToCache" is not exactly
- # true, as we are not copying from the DP, but it will
- # still lock out other processes trying to start data
- # operations, which is what we want.
- state.status_transition!(state.status, "ToCache") # if we fail, race condition
puts "SYNC: ModCache: #{state.pretty} Update" if DebugMessages
+ if state.status != 'ToCache'
+ raise "Sync error: cache for #{prettyfile} cannot be updated after two attempts. Status=#{state.status}"
+ end
# Now, perform the ModifyCache operation
self.wrap_block(
@@ -390,34 +416,43 @@ def self.ready_to_modify_dp(userfile)
unless userfile_id
return yield
end
+ prettyfile = "'#{userfile.name}' (##{userfile_id})" # for messages
state = self.get_or_create_status(userfile_id)
puts "SYNC: ModProv: #{state.pretty} Entering" if DebugMessages
- # Wait until no other local client is copying the file's content
- # in one direction or the other.
- allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
- state.reload
- state.invalidate_old_status
- puts "SYNC: ModProv: #{state.pretty} Check" if DebugMessages
- state.status !~ /^To/ # no ToProvider or ToCache
- end
- puts "SYNC: ModProv: #{state.pretty} Proceed" if DebugMessages
+ # This loops attempts to wait for and then lock out other
+ # processes on the same server.
+ 2.times do
+
+ # Wait until no other local client is copying the file's content
+ # in one direction or the other.
+ allok = repeat_every_formax_untilblock(CheckInterval,CheckMaxWait) do
+ state.reload
+ state.invalidate_old_status
+ puts "SYNC: ModProv: #{state.pretty} Check" if DebugMessages
+ state.status !~ /^To/ # no ToProvider or ToCache
+ end
+ puts "SYNC: ModProv: #{state.pretty} Proceed" if DebugMessages
- if ! allok # means timeout occurred
- oldstate = state.status
- #state.status_transition(oldstate, "CacheNewer") # do our best; not needed?
- raise "Sync error: timeout waiting for file '#{userfile_id}' " +
- "in '#{oldstate}' for operation 'ModifyProvider'."
- end
+ if ! allok # means timeout occurred
+ oldstate = state.status
+ raise "Sync error: timeout waiting for #{prettyfile} in '#{oldstate}' for operation 'ModifyProvider'."
+ end
+
+ # Adjust state to let all other processes know that
+ # we want to modify the provider side. "ToProvider" is not
+ # exactly true, as we are not copying to the DP, but it will
+ # still lock out other processes trying to start data
+ # operations, which is what we want.
+ break if state.status_transition(state.status, "ToProvider") # if we fail, race condition
+
+ end # loop 2 times
- # Adjust state to let all other processes know that
- # we want to modify the provider side. "ToProvider" is not
- # exactly true, as we are not copying to the DP, but it will
- # still lock out other processes trying to start data
- # operations, which is what we want.
- state.status_transition!(state.status, "ToProvider") # if we fail, race condition
puts "SYNC: ModProv: #{state.pretty} Update" if DebugMessages
+ if state.status != 'ToProvider'
+ raise "Sync error: provider content for #{prettyfile} cannot be modified after two attempts. Status=#{state.status}"
+ end
# Wait until all other clients out there are done
# transferring content to/from the provider, one way or the other.
@@ -429,8 +464,7 @@ def self.ready_to_modify_dp(userfile)
end
if ! allok # means timeout occurred
- raise "Sync error: timeout waiting for other clients for " +
- "file '#{userfile_id}' for operation 'ModifyProvider'."
+ raise "Sync error: timeout waiting for other clients for #{prettyfile} for operation 'ModifyProvider'."
end
# Now, perform the ModifyProvider operation.
@@ -479,11 +513,13 @@ def self.ready_to_modify_dp(userfile)
def invalidate_old_status
# "InSync" state is too old for current RemoteResource
- myself = RemoteResource.current_resource
- expire = myself.cache_trust_expire # in seconds before now
- expire = nil if expire && expire < 3600 # we don't accept thresholds less than one hour
- expire = 2.years.to_i if expire && expire > 2.years.to_i
- if expire and self.status == "InSync" && self.synced_at < Time.now - expire
+ if @expire.nil? # this value is global for the current APP (Bourreau or Portal)
+ myself = RemoteResource.current_resource
+ @expire = myself.cache_trust_expire || 0 # in seconds before now
+ @expire = 0 if @expire < 3600 # we don't accept thresholds less than one hour
+ @expire = 2.years.to_i if @expire > 2.years.to_i
+ end
+ if @expire > 0 and self.status == "InSync" && self.synced_at < Time.now - @expire
puts "SYNC: Invalid: #{self.pretty} InSync Is Too Old" if DebugMessages
self.status_transition(self.status, "ProvNewer")
return
@@ -504,7 +540,7 @@ def pretty #:nodoc:
end
# This method changes the status attribute
- # in the current task object to +to_state+ but
+ # in the current sync_status object to +to_state+ but
# also makes sure the current value is +from_state+ .
# The change is performed in a transaction where
# the record is locked, to ensure the transition is
diff --git a/BrainPortal/app/models/tool_config.rb b/BrainPortal/app/models/tool_config.rb
index 8d4229f16..8f85347cc 100644
--- a/BrainPortal/app/models/tool_config.rb
+++ b/BrainPortal/app/models/tool_config.rb
@@ -67,6 +67,21 @@ class ToolConfig < ApplicationRecord
api_attr_visible :version_name, :description, :tool_id, :bourreau_id, :group_id, :ncpus
+ # given array of pairs variable/value builds export script, a prefix is added to variables options
+ def vars_to_export_script(varprefix="")
+ env = self.env_array || []
+ commands = ""
+ env.each do |name_val|
+ name = name_val[0]
+ val = name_val[1]
+ name.strip!
+ #val.gsub!(/'/,"'\''")
+ commands += "export #{varprefix}#{name}=\"#{val}\"\n"
+ end
+ commands += "\n" if env.size > 0
+ commands
+ end
+
# To make it somewhat compatible with the ResourceAccess module,
# here's this model's own method for checking if it's visible to a user.
def can_be_accessed_by?(user)
@@ -154,8 +169,10 @@ def extended_environment
# Generates a partial BASH script that initializes environment
# variables and is followed a the script prologue stored in the
- # object.
- def to_bash_prologue
+ # object. For singularity prologues, special prefixes are added to
+ # variable names to ensure they will be propagated to the container
+ # even in presence of --cleanenv parameteres and such
+ def to_bash_prologue(singularity=false)
tool = self.tool
bourreau = self.bourreau
group = self.group
@@ -194,17 +211,25 @@ def to_bash_prologue
#---------------------------------------------------
ENV_HEADER
- env.each do |name_val|
- name = name_val[0]
- val = name_val[1]
- name.strip!
- #val.gsub!(/'/,"'\''")
- script += "export #{name}=\"#{val}\"\n"
+ script += vars_to_export_script
+
+ if singularity
+ script += <<-ENV_HEADER
+#---------------------------------------------------
+# Ensuring that environment variables are propagated:#{env.size == 0 ? " (NONE DEFINED)" : ""}
+#---------------------------------------------------
+
+ ENV_HEADER
+ script += vars_to_export_script("SINGULARITYENV_")
+ script += vars_to_export_script("APPTAINERENV_") # SINGULARITYENV is to be depricated
+
end
script += "\n" if env.size > 0
prologue = self.script_prologue || ""
- script += <<-SCRIPT_HEADER
+ script += <<-SCRIPT_HEADER
+
+
#---------------------------------------------------
# Script Prologue:#{prologue.blank? ? " (NONE SUPPLIED)" : ""}
#---------------------------------------------------
@@ -291,6 +316,13 @@ def is_at_least_version(version)
end
end
+ # true if singularity image is defined
+ def use_singularity?
+ return self.container_engine == "Singularity" &&
+ ( self.containerhub_image_name.present? ||
+ self.container_image_userfile_id.present? )
+ end
+
# This method calls any custom compare_versions() method defined
# in the CbrainTask subclass for the tool of the current tool_config.
# Returns true if the version_name of the current tool_config
@@ -333,6 +365,8 @@ def cbrain_task_class
# dp:1234
# # CBRAIN db registered file
# userfile:1234
+ # # A ext3 capture filesystem, will NOT be returned here as an overlay
+ # ext3capture:basename=12G
def singularity_overlays_full_paths
specs = parsed_overlay_specs
specs.map do |knd, id_or_name|
@@ -356,6 +390,8 @@ def singularity_overlays_full_paths
cb_error "Userfile with id '#{id_or_name}' for overlay fetching not found." if ! userfile
userfile.sync_to_cache() rescue cb_error "Userfile with id '#{id_or_name}' for fetching overlay failed to synchronize."
userfile.cache_full_path()
+ when 'ext3capture'
+ [] # flatten will remove all that
else
cb_error "Invalid '#{knd}:#{id_or_name}' overlay."
end
@@ -374,6 +410,16 @@ def data_providers_with_overlays
end.compact
end
+ # Returns pairs [ [ basename, size], ...] as in [ [ 'work', '28g' ]
+ def ext3capture_basenames
+ specs = parsed_overlay_specs
+ return [] if specs.empty?
+ specs
+ .map { |pair| pair[1] if pair[0] == 'ext3capture' }
+ .compact
+ .map { |basename_and_size| basename_and_size.split("=") }
+ end
+
#################################################################
# Validation methods
#################################################################
@@ -429,6 +475,9 @@ def parsed_overlay_specs
# userfile:333
# dp:123
# dp:dp_name
+ # ext3capture:basename=SIZE
+ # ext3capture:work=30G
+ # ext3capture:tool_1.1.2=15M
#
def validate_overlays_specs #:nodoc:
specs = parsed_overlay_specs
@@ -444,9 +493,9 @@ def validate_overlays_specs #:nodoc:
case kind # different validations rules for file, userfile and dp specs
when 'file', 'old style file' # full path specification for a local file, e.g. "file:/myfiles/c.sqs"
- if id_or_name !~ /^\/\S+\.(sqs|squashfs)$/i
+ if id_or_name !~ /^\/\S+\.(sqs|sqfs|squashfs)$/
self.errors.add(:singularity_overlays_specs,
- " contains invalid #{kind} named '#{id_or_name}'. It should be a full path that ends in .squashfs or .sqs")
+ " contains invalid #{kind} named '#{id_or_name}'. It should be a full path that ends in .squashfs, .sqs or .sqfs")
end
when 'userfile' # db-registered file spec, e.g. "userfile:42"
@@ -461,9 +510,9 @@ def validate_overlays_specs #:nodoc:
self.errors.add(:singularity_overlays_specs,
%{" contains invalid userfile id '#{id_or_name}'. The file with id '#{id_or_name}' is not found."}
)
- elsif ! userfile.name.end_with?('.sqs') && ! userfile.name.end_with?('.squashfs')
+ elsif userfile.name.to_s !~ /\.(sqs|sqfs|squashfs)$/
self.errors.add(:singularity_overlays_specs,
- " contains invalid userfile with id '#{id_or_name}' and name '#{userfile.name}'. File name should end in .squashfs or .sqs")
+ " contains invalid userfile with id '#{id_or_name}' and name '#{userfile.name}'. File name should end in .squashfs, .sqs or .sqfs")
# todo maybe or/and check file type?
end
@@ -475,6 +524,12 @@ def validate_overlays_specs #:nodoc:
self.errors.add(:singularity_overlays_specs, "DataProvider '#{id_or_name}' is not a SingSquashfsDataProvider")
end
+ when 'ext3capture' # ext3 filesystem as a basename with an initial size
+ # The basename is limited to letters, digits, numbers and dashes; the =SIZE suffix must end with G or M
+ if id_or_name !~ /\A\w[\w\.-]+=([1-9]\d*)[mg]\z/i
+ self.errors.add(:singularity_overlays_specs, "contains invalid ext3capture specification (must be like ext3capture:basename=1g or 2m etc)")
+ end
+
else
# Other errors
self.errors.add(:singularity_overlays_specs, "contains invalid specification '#{kind}:#{id_or_name}'")
@@ -573,10 +628,12 @@ def self.create_from_descriptor(bourreau, tool, descriptor, record_path=false)
container_info = descriptor.container_image || {}
container_engine = container_info['type'].presence.try(:capitalize)
- container_engine = "Singularity" if (container_engine == "Docker" &&
+ container_engine = "Singularity" if (container_engine == "Docker" &&
!bourreau.docker_present? &&
bourreau.singularity_present?
)
+ container_index = container_info['index'].presence
+ container_index = 'docker://' if container_index == 'index.docker.io' # old convention
tc = ToolConfig.create!(
# Main three keys
:tool_id => tool.id,
@@ -593,7 +650,7 @@ def self.create_from_descriptor(bourreau, tool, descriptor, record_path=false)
:boutiques_descriptor_path => (record_path.presence && descriptor.from_file),
# The following three attributes are for containerization; not sure about values
:container_engine => container_engine,
- :container_index_location => container_info['index'].presence,
+ :container_index_location => container_index,
:containerhub_image_name => container_info['image'].presence,
)
diff --git a/BrainPortal/app/models/user.rb b/BrainPortal/app/models/user.rb
index c06063e17..9b966bb6e 100644
--- a/BrainPortal/app/models/user.rb
+++ b/BrainPortal/app/models/user.rb
@@ -382,7 +382,7 @@ def destroy_user_sessions
# If option +create_it+ is true, create the key files if necessary.
# If option +ok_no_files+ is true, will return the object even if
# the key files don't exist yet (default it to raise an exception)
- def ssh_key(options = { :create_id => false, :ok_no_files => false })
+ def ssh_key(options = { :create_it => false, :ok_no_files => false })
name = "u#{self.id}" # Avoiding username in ssh filenames or in comment.
return SshKey.find_or_create(name) if options[:create_it]
return SshKey.new(name) if options[:ok_no_files]
diff --git a/BrainPortal/app/models/userfile.rb b/BrainPortal/app/models/userfile.rb
index a15d5e2ac..0c04fea29 100644
--- a/BrainPortal/app/models/userfile.rb
+++ b/BrainPortal/app/models/userfile.rb
@@ -649,11 +649,11 @@ def available?
# local userfile, and get to the cache quickly if it's already there.
#
# The +attributes+ describes a userfile's attribute for tracking
- # the data; normally only the 'name' is required is should be specific
+ # the data. Normally only the 'name' is required and it should be specific
# enough to represent a particular piece of data (e.g. a container
# image name with full version in it). A block must be given to the
- # method, and it will be invoked if the data is not already cached;
- # it will receive a single argument, the path to the caching subsystem
+ # method, and it will be invoked if the data is not already cached.
+ # The block will receive a single argument, the path to the caching subsystem
# where the userfile's data should be installed (the path is the same
# as that returned by 'DataProvider#cache_full_path()'. The block is
# expected to fill this path with appropriate files and/or directories.
@@ -1155,7 +1155,7 @@ def validate_browse_path
# This method is invoked before the creation of any file.
# It will raise an CbrainDiskQuotaExceeded exception if the
- # user has exceeded a quota (space, or number of files) for the DP.
+ # user has already exceeded a quota (space, or number of files) for the DP.
def check_exceeded_quota!
DiskQuota.exceeded!(self.user_id, self.data_provider_id)
true
diff --git a/BrainPortal/app/models/userkey_flat_dir_ssh_data_provider.rb b/BrainPortal/app/models/userkey_flat_dir_ssh_data_provider.rb
index 54c56863f..8cf27ec66 100644
--- a/BrainPortal/app/models/userkey_flat_dir_ssh_data_provider.rb
+++ b/BrainPortal/app/models/userkey_flat_dir_ssh_data_provider.rb
@@ -88,5 +88,15 @@ def ssh_shared_options(user = nil, userfile = nil) #:nodoc:
self.master(user, userfile).ssh_shared_options
end
+ #################################################################
+ # Model Callbacks
+ #################################################################
+
+ # Normally, DPs can only be owned by admins. However, this DP class
+ # is meant to be owned by normal users.
+ def owner_is_appropriate #:nodoc:
+ return true
+ end
+
end
diff --git a/BrainPortal/app/models/work_group.rb b/BrainPortal/app/models/work_group.rb
index c7f1851fe..fde5cf535 100644
--- a/BrainPortal/app/models/work_group.rb
+++ b/BrainPortal/app/models/work_group.rb
@@ -31,7 +31,7 @@ class WorkGroup < Group
# This method optimizes the DB lookups necessary to
# create the pretty_category_name of a set of WorkGroups
def self.prepare_pretty_category_names(groups = [], as_user = nil)
- wgs = Array(groups).select { |g| g.is_a?(WorkGroup) && !g.invisible? }
+ wgs = Array(groups).select { |g| g.is_a?(WorkGroup) && !g.invisible? && !g.public?}
wg_ids = wgs.map(&:id)
wg_ucnt = WorkGroup.joins("LEFT JOIN groups_users ON groups_users.group_id=groups.id LEFT JOIN users ON users.id=groups_users.user_id").where('groups.id' => wg_ids).group('groups.id').count('users.login') # how many users per workgroup
diff --git a/BrainPortal/app/views/access_profiles/show.html.erb b/BrainPortal/app/views/access_profiles/show.html.erb
index 7ce15ff9f..52bd6966b 100644
--- a/BrainPortal/app/views/access_profiles/show.html.erb
+++ b/BrainPortal/app/views/access_profiles/show.html.erb
@@ -65,7 +65,7 @@
<%= show_table(@access_profile, :form_helper => cf, :edit_condition => check_role(:admin_user), :header => @access_profile.new_record? ? 'Project Membership' : 'Projects In This Profile') do |t| %>
<% group_names = (@access_profile.groups.sort_by(&:name).map { |g| link_to_group_if_accessible(g) }.join(", ").html_safe.presence) || "(None)" %>
- <% t.edit_cell(:group_ids, :show_width => 2, :no_header => "Projects", :content => group_names) do %>
+ <% t.edit_cell(:group_ids, :show_width => 2, :no_header => "Projects", :td_options => { :class => "wrap" }, :content => group_names) do %>
<%= render :partial => 'shared/group_tables', :locals => { :model => @access_profile } %>
diff --git a/BrainPortal/app/views/bourreaux/_notes.html.erb b/BrainPortal/app/views/bourreaux/_notes.html.erb
index 023251f1c..7511958df 100644
--- a/BrainPortal/app/views/bourreaux/_notes.html.erb
+++ b/BrainPortal/app/views/bourreaux/_notes.html.erb
@@ -18,7 +18,7 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
+# along with this program. If not, see .
#
-%>
@@ -46,89 +46,5 @@
are several ways that the BrainPortal can be told how to connect to
and manage the Execution Server, which explains all the fields in this form.
-
-
Hardcoded DB and ActiveResource
-
-
- In the most ordinary case, an Execution Server is installed and deployed
- manually by a CBRAIN administrator; this requires choosing the
- listen port (the port argument to its script/server
- -p port command when it's started) and creating its
- database.yml file. For such a manual installation, the
- only two required field in this form will be those in the box
- ActiveResource Configuration. Note that these
- two fields are completely ignored if the ActiveResource connection
- is being tunnelled (see below).
-
-
-
Hardcoded, But With Remote Control
-
-
- If the fields in the section SSH Remote Control
- Configuration are filled and describe the deployment
- remote account of the Execution Server, then the BrainPortal will have the
- ability to establish a persistent SSH master connection to that
- host and allow the administrator to start and stop the Server
- right from the WEB interface. Note that the UNIX user under which
- the BrainPortal runs must have the proper SSH public keys for that
- remote account installed.
-
-
-
Remote Control And Optional Tunnels
-
-
- It's possible the further deviate from the standard hardcoded
- Execution Server by having the BrainPortal set up SSH tunnels for
- either (or both) of the ActiveResource connection and the
- Database Server connection. This can be turned on by entering
- port numbers in the two fields in the section Tunnelling
- Configuration. For this to work, the SSH Remote
- Control Configuration must be operational (see above). The
- port numbers specified here must both be public TCP port that are
- free on the Execution Server's host; they can be arbitrary numbers between
- 1024 and 65530, not used by other services.
-
-
-
- Tunnelling the ActiveResource connection: Entering
- a port number here will tell the BrainPortal to select this
- port for the Execution Server's Rails listening port (the port
- argument to its script/server -p port command when it's
- started). When starting the Execution Server using the interface, a SSH
- tunnel will be established between a local port (not shown here
- anywhere) on the BrainPortal side and the listening HTTP port
- of the Server selected here. This means that the two fields in
- the form's section ActiveResource Configuration
- will be completely ignored.
-
-
-
- Tunnelling the DB server connection: Entering a port
- number here will tell the BrainPortal to select this port for the
- Execution Server's Rails DB connection; more accurately, by doing this, the
- BrainPortal will create from scratch a new database.yml
- file, send it over to the Execution Server side, and force it to use
- it. This database.yml will tell the Server to connect
- to host 127.0.0.1 on the port number specified here. This port
- will be configured to tunnel back to the BrainPortal all the way
- to the DB server used by the BrainPortal itself. Note that this
- can only work if the BrainPortal is connecting to its DB via a IP
- domain connection, not a file socket. For MySQL for instance,
- it means the BrainPortal's database.yml file must NOT
- have the value localhost for the Host
- field. Also, it's likely that you'll need to comment out the entry
- "bind-address" in the server's my.cnf file.
-
-
-
- A side effect of tunnelling the DB server is that any
- database.yml file already present on the Execution Server side will
- be backed up, and also that the newly created database.yml
- file will be DELETED after the Server is started. This provides a
- high security context for the DB server's connection as if there
- were no database.yml file at all. And if there were,
- and tunnelling is disabled by blanking out the port field, then
- the backup database.yml will be restored.
-
diff --git a/BrainPortal/app/views/data_providers/_data_providers_table.html.erb b/BrainPortal/app/views/data_providers/_data_providers_table.html.erb
index b9e168029..aa253354f 100644
--- a/BrainPortal/app/views/data_providers/_data_providers_table.html.erb
+++ b/BrainPortal/app/views/data_providers/_data_providers_table.html.erb
@@ -3,7 +3,7 @@
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -30,10 +30,14 @@
<% if check_role(:site_manager) || check_role(:admin_user) %>
- <%= link_to "Create New Data Provider", new_data_provider_path, :class => "button menu_button" %>
+ <%= link_to "Create New System Data Provider", new_data_provider_path, :class => "button menu_button" %>
<% end %>
- <%= link_to "Check All", nil, {:class => "button check_all_dp",} %>
+ <%= link_to "Create Personal Data Provider", new_personal_data_providers_path, :class => "button menu_button" %>
+
+ <% if check_role(:admin_user) %>
+ <%= link_to "Check All", nil, {:class => "button check_all_dp",} %>
+ <% end %>
<%= link_to "User Access Report",
{:controller => :data_providers, :action => :dp_access},
diff --git a/BrainPortal/app/views/data_providers/_show_user_key.html.erb b/BrainPortal/app/views/data_providers/_show_user_key.html.erb
new file mode 100644
index 000000000..a0fb0e184
--- /dev/null
+++ b/BrainPortal/app/views/data_providers/_show_user_key.html.erb
@@ -0,0 +1,63 @@
+
+<%-
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+-%>
+
+
+
+
SSH Key Configuration Instructions
+
+
+ The box below shows your personal, public SSH key that the CBRAIN system will use
+ to connect to your Data Provider. It is one line of text that needs to be installed
+ in your home directory on the remote host you configured for your Data Provider.
+
+ For experts: if you are already familiar with this type of setup, you can simply cut-and-paste the
+ key with a text editor in the file .ssh/authorized_keys on the remote system.
+ Make sure permissions on the folder .ssh are 'rwx------' and on the file authorized_keys are 'rw-------'.
+
+ For newcomers: consider downloading the key using the link below and saving it as a file 'mykey.pub' on
+ your current computer (or any computer). Then in a bash shell, run the 'ssh-copy-id' command as
+ explained below and it will automatically connect to the remote host
+ and install the key for you. The full command is:
+ Note that revealing this key's content to other people causes no security risks.
+ The information in this key is meant to be public and people cannot use it to access your information.
+
+
+
Your Personal CBRAIN Public SSH Key
+
+
<%= pretty_ssh_key @provider.user.ssh_key(create_it: true).public_key rescue "Error fetching public key" %>
+
diff --git a/BrainPortal/app/views/data_providers/new_personal.html.erb b/BrainPortal/app/views/data_providers/new_personal.html.erb
new file mode 100644
index 000000000..56eab5b18
--- /dev/null
+++ b/BrainPortal/app/views/data_providers/new_personal.html.erb
@@ -0,0 +1,100 @@
+
+<%-
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+-%>
+
+<% title 'Add New Personal Data Provider' %>
+
+
This will control which users within CBRAIN can view and access the files on your storage. The default and recommended project is your own private project, '<%= current_user.own_group.name %>'.
+
+
+
+
+
+
+
+
+
+ <%= render :partial => 'show_user_key' %>
+
+
+
+ <%= submit_tag 'Create New Data Provider' %>
+
+<% end %>
+
diff --git a/BrainPortal/app/views/data_providers/show.html.erb b/BrainPortal/app/views/data_providers/show.html.erb
index c215d7c83..fd5de1b3d 100644
--- a/BrainPortal/app/views/data_providers/show.html.erb
+++ b/BrainPortal/app/views/data_providers/show.html.erb
@@ -3,7 +3,7 @@
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -24,6 +24,10 @@
<% title 'Data Provider Info' %>
+<% has_owner_access = (check_role(:admin_user) || @provider.user_id == current_user.id) %>
+<% is_userkey_dp = @provider.is_a?(UserkeyFlatDirSshDataProvider) %>
+<% needs_ssh_config = @provider.is_a?(SshDataProvider) || @provider.is_a?(SmartDataProviderInterface) %>
+
<% end %>
diff --git a/BrainPortal/app/views/disk_quotas/report.html.erb b/BrainPortal/app/views/disk_quotas/report.html.erb
new file mode 100644
index 000000000..04f34612c
--- /dev/null
+++ b/BrainPortal/app/views/disk_quotas/report.html.erb
@@ -0,0 +1,86 @@
+
+<%-
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+-%>
+
+<% title 'Exceeded Quotas' %>
+
+
+
diff --git a/BrainPortal/app/views/disk_quotas/show.html.erb b/BrainPortal/app/views/disk_quotas/show.html.erb
index d462ba6d2..fcb218ac2 100644
--- a/BrainPortal/app/views/disk_quotas/show.html.erb
+++ b/BrainPortal/app/views/disk_quotas/show.html.erb
@@ -49,33 +49,41 @@
<% t.cell("User") do %>
<% if @disk_quota.new_record? %>
- <%= user_select("disk_quota[user_id]", { :selector => (@disk_quota.user_id.presence || ""), :include_blank => '(Default For All Users)' }) %>
+ <%= user_select("disk_quota[user_id]", { :selector => @disk_quota.user_id, :include_blank => '(Default For All Users)' }) %>
<% else %>
- <%= @disk_quota.user_id.zero? ?
+ <%= @disk_quota.is_for_dp? ?
html_colorize("(Default for all users)", 'orange') :
link_to_user_if_accessible(@disk_quota.user) %>
<% end %>
<% end %>
- <% t.edit_cell(:max_bytes, :content => colored_pretty_size(@disk_quota.max_bytes), :header => "Max Disk Space") do |f| %>
+ <% t.edit_cell(:max_bytes, :header => "Max Disk Space", :content => pretty_quota_max_bytes(@disk_quota)) do |f| %>
<%= f.text_field :max_bytes, :size => 12 %>
Sizes are in bytes; when entering a new value,
you can use a unit as a suffix, such as in 2.3 kb and 10 G.
+ A value of -1 means no files allowed at all.
<% end %>
<% t.cell("Data Provider") do %>
<% if @disk_quota.new_record? %>
- <%= data_provider_select("disk_quota[data_provider_id]", { :selector => (@disk_quota.data_provider_id.presence || ""), :include_blank => '(Select a DataProvider)' }) %>
+ <%= data_provider_select("disk_quota[data_provider_id]", { :selector => @disk_quota.data_provider_id, :include_blank => '(Select a DataProvider)' }) %>
<% else %>
<%= link_to_data_provider_if_accessible(@disk_quota.data_provider) %>
<% end %>
<% end %>
- <% t.edit_cell(:max_files, :header => "Max Num Files") do |f| %>
+ <% t.edit_cell(:max_files, :header => "Max Number Of Files", :content => pretty_quota_max_files(@disk_quota)) do |f| %>
<%= f.text_field :max_files, :size => 12 %>
+
-
-
- <% if g.users.include?(current_user) && user_count > 1 %>
- Users: <%= user_count %>
- <% elsif css_type != "user" && user_count == 1 %>
- <% # FIXME can be rather slow, as it forces a query for each group with just one user %>
- User: <%= g.users.first.name %>
- <% end %>
-
- Files: <%= @group_id_2_userfile_counts[g.id].to_i %>
- Tasks: <%= @group_id_2_task_counts[g.id].to_i %>
- <% if desc_first %>
-
"}.join.html_safe
- }
- }
- end
-
-
-
- t.column("Name", :name,
- :sortable => true
- ) { |g| link_to_group_if_accessible(g) }
-
- t.column("Description", :description,
- :sortable => true,
- ) { |g| overlay_description(g.description) }
-
- t.column("Project Type", :type,
- :sortable => true,
- :filters => default_filters_for(@base_scope, :type)
- ) { |g| g.pretty_category_name(current_user) }
-
- t.column("Site", :site,
- :sortable => true,
- :filters => default_filters_for(@base_scope, Site)
- ) { |g| link_to_site_if_accessible(g.site) }
-
- t.column("Creator", :creator_id,
- :sortable => true,
- :filters => scoped_filters_for(
- @base_scope, @view_scope, :creator_id,
- scope: @scope,
- label: 'users.login',
- association: [User, 'id', 'creator_id']
- )
- ) { |g| link_to_user_if_accessible(g.creator) }
-
- t.column("Users", :users) do |g|
- @group_id_2_user_counts[g.id].to_s.presence || html_colorize("(none)", "red")
- end
- t.column("Files", :files) do |g|
- index_count_filter @group_id_2_userfile_counts[g.id], :userfiles, { :group_id => g.id }, :show_zeros => true
- end
- t.column("Tasks", :tasks) do |g|
- index_count_filter @group_id_2_task_counts[g.id], :tasks, { :group_id => g.id }, :show_zeros => true
- end
-
- if current_user.has_role?(:admin_user)
- t.column("Tools", :tools) do |g|
- index_count_filter @group_id_2_tool_counts[g.id], :tools, { :group_id => g.id }
- end
- t.column("Data Providers", :data_providers) do |g|
- index_count_filter @group_id_2_data_provider_counts[g.id], :data_providers, { :group_id => g.id }
- end
- t.column("Portal", :portals) do |g|
- index_count_filter @group_id_2_brain_portal_counts[g.id], :bourreaux, { :group_id => g.id, :type => "BrainPortal" }
- end
- t.column("Execution", :bourreaux) do |g|
- index_count_filter @group_id_2_bourreau_counts[g.id], :bourreaux, { :group_id => g.id, :type => "Bourreau" }
- end
- end
-
- t.column("Switch", :switch) do |g|
- link_to 'Switch', { :action => :switch, :id => g.id },
- :class => 'action_link',
- :method => :post
- end
- end
- %>
+ <%= render(:partial => 'view_list') %>
<% end %>
+
diff --git a/BrainPortal/app/views/groups/_view_buttons.html.erb b/BrainPortal/app/views/groups/_view_buttons.html.erb
new file mode 100644
index 000000000..2beaebed6
--- /dev/null
+++ b/BrainPortal/app/views/groups/_view_buttons.html.erb
@@ -0,0 +1,192 @@
+
+<%-
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+-%>
+
+
+ A project is a way to group together under single name a set
+ of CBRAIN files and tasks. A project is not a folder.
+ Switching to a project makes it the 'active' project. When a
+ project is active, an automatic implicit filter will be applied
+ such that only files and tasks assigned to the project are shown
+ in the file or task manager.
+
+ These projects are visible only to you. There is one particular
+ project named <%= current_user.own_group.name %> that is created
+ by the system for you, by default, and cannot be deleted. Any of
+ these projects can be turned into a Shared Project by inviting other
+ users to join. When this happens, the projects will appear in a separate tab.
+
+ These are projects that you created and are shared with other users.
+ Files and tasks assigned to a project are visible to all users of that project.
+
+ <%= render :partial => 'view_buttons_tab', :locals => { :tab_groups => my_shared_groups } %>
+ <% end %>
+
+ <%= other_users_shared_groups.presence && tb.tab("Projects Shared With Me") do %>
+
+ These are projects created by other users who have invited you to join them.
+ Files and tasks assigned to a project are visible to all users of that project.
+
+ This special ALL Project is in fact no
+ project at all. Selecting this as your currently active
+ 'project' will in fact disable all project-based filtering.
+ The file manager and task manager will both show you a new
+ column where you'll be able to filter by project directly
+ there. Selecting the ALL Project is useful
+ when you need to manage or browse files and tasks that are
+ in multiple projects.
+
Projects that for some reason are not assigned to other tabs
+ <%= render :partial => 'view_buttons_tab', :locals => { :tab_groups => other_groups } %>
+ <% end %>
+
+ <% end %>
+
diff --git a/BrainPortal/app/views/groups/_view_buttons_tab.html.erb b/BrainPortal/app/views/groups/_view_buttons_tab.html.erb
new file mode 100644
index 000000000..8ccad01ad
--- /dev/null
+++ b/BrainPortal/app/views/groups/_view_buttons_tab.html.erb
@@ -0,0 +1,99 @@
+
+<%-
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+-%>
+
+<%
+# This partial must receive tab_groups as an array of Groups (with or without the single special string 'ALL')
+%>
+
+ <% current_user_id = current_user.id %>
+
+
+ <% css_small = "small" if @scope.custom[:small_buttons] %>
+ <% tab_groups.each do |g| %>
+
+ <% if g == "ALL" %>
+
diff --git a/BrainPortal/app/views/groups/_view_list.html.erb b/BrainPortal/app/views/groups/_view_list.html.erb
new file mode 100644
index 000000000..2deb8ac09
--- /dev/null
+++ b/BrainPortal/app/views/groups/_view_list.html.erb
@@ -0,0 +1,112 @@
+
+<%-
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+-%>
+
+ <% @groups = @groups.to_a.unshift('ALL')%>
+ <%=
+ dynamic_scoped_table(@groups,
+ :id => 'groups_table',
+ :class => [ :resource_list ],
+ :scope => @scope,
+ :order_map => {
+ :site => { :a => 'sites.name', :j => Site },
+ :creator => { :a => 'users.login', :j => User }
+ },
+ :filter_map => {
+ :site => { :a => 'site_id' },
+ :creator => { :a => 'creator_id' }
+ }
+ ) do |t|
+ t.paginate
+
+ t.row do |g|
+ next unless (g == "ALL")
+ switch = link_to 'Switch', { :action => :switch, :id => "all"},
+ :class => 'action_link',
+ :method => :post
+ row_content =
+ {:name => "All",
+ :description => "Represents all the projects",
+ :type => "All Projects",
+ :site => "",
+ :creator_id => "",
+ :users => "",
+ :files => @group_id_2_userfile_counts[nil] || "(None)",
+ :tasks => @group_id_2_task_counts[nil] || "(None)",
+ :switch => switch,
+ }
+ {
+ :override => lambda { |g,r,t|
+ row_content.map { |k,v|
+ "
+ Make the project public, so that all users can access the files. Be careful with this option! You can always make the project public later on:
+ <%= f.check_box :public %>
+
<% end %>
diff --git a/BrainPortal/app/views/portal/available.html.erb b/BrainPortal/app/views/portal/available.html.erb
index 47bd8d3cc..ef97708ed 100644
--- a/BrainPortal/app/views/portal/available.html.erb
+++ b/BrainPortal/app/views/portal/available.html.erb
@@ -30,13 +30,18 @@ Not all of them are available to all users.
diff --git a/BrainPortal/app/views/service/stats.html.erb b/BrainPortal/app/views/service/stats.html.erb
deleted file mode 100644
index 9d61fb511..000000000
--- a/BrainPortal/app/views/service/stats.html.erb
+++ /dev/null
@@ -1,57 +0,0 @@
-
-<%-
-#
-# CBRAIN Project
-#
-# Copyright (C) 2008-2012
-# The Royal Institution for the Advancement of Learning
-# McGill University
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
--%>
-
-<% title 'Service Stats' %>
-
-
-
- For a more detailed report, see the <%= link_to "Detailed Service Stats", :action => :detailed_stats %> page.
diff --git a/BrainPortal/app/views/tasks/edit.html.erb b/BrainPortal/app/views/tasks/edit.html.erb
index 3240b7955..7e34ecc80 100644
--- a/BrainPortal/app/views/tasks/edit.html.erb
+++ b/BrainPortal/app/views/tasks/edit.html.erb
@@ -54,7 +54,7 @@
-<%= submit_tag "Save modified parameters for this task", :class => "button" %>
+<%= submit_tag "Save modified parameters for this task", :class => "button", :disabled => @task.errors[:unsavable].present? %>
<% end %>
diff --git a/BrainPortal/app/views/tasks/new.html.erb b/BrainPortal/app/views/tasks/new.html.erb
index 4a1d7af76..8e33a5017 100644
--- a/BrainPortal/app/views/tasks/new.html.erb
+++ b/BrainPortal/app/views/tasks/new.html.erb
@@ -53,7 +53,7 @@
<% unless @task.class.properties[:no_submit_button] %>
- <%= submit_tag "Start #{@task.pretty_name}", :class => "button" %>
+ <%= submit_tag "Start #{@task.pretty_name}", :class => "button", :disabled => @task.errors[:unsavable].present? %>
<% end %>
diff --git a/BrainPortal/app/views/tasks/show.html.erb b/BrainPortal/app/views/tasks/show.html.erb
index 1f216818e..e08d691ac 100644
--- a/BrainPortal/app/views/tasks/show.html.erb
+++ b/BrainPortal/app/views/tasks/show.html.erb
@@ -150,7 +150,7 @@
<% if @task.zenodo_doi.starts_with?( ZenodoHelper::ZenodoSandboxDOIPrefix ) %>
Published: <%= link_to_deposit(@task.zenodo_deposit_id) %>
<% else %>
- Published: <%= link_to_doi(@task.zenodo_doi) %>
+ Published: <%= link_to_zenodo_doi(@task.zenodo_doi) %>
<% end %>
<% elsif @task.zenodo_deposit_id.present? %>
In progress: <%= link_to_deposit(@task.zenodo_deposit_id) %>
diff --git a/BrainPortal/app/views/tool_configs/_by_resource.erb b/BrainPortal/app/views/tool_configs/_by_resource.erb
index 174b780fb..fad1f0d52 100644
--- a/BrainPortal/app/views/tool_configs/_by_resource.erb
+++ b/BrainPortal/app/views/tool_configs/_by_resource.erb
@@ -56,9 +56,9 @@
no_version_summary = ""
if versus_without_tcs.present?
if for_tool
- no_version_summary << html_colorize("No versions configured for these tools: ")
- else
no_version_summary << html_colorize("No versions configured for these servers: ")
+ else
+ no_version_summary << html_colorize("No versions configured for these tools: ")
end
versus_without_tcs.each do |vs|
diff --git a/BrainPortal/app/views/tool_configs/_form_fields.html.erb b/BrainPortal/app/views/tool_configs/_form_fields.html.erb
index 24a3abb41..fb166cb6a 100644
--- a/BrainPortal/app/views/tool_configs/_form_fields.html.erb
+++ b/BrainPortal/app/views/tool_configs/_form_fields.html.erb
@@ -210,7 +210,7 @@
for now.
<% end %>
-
+
<% t.edit_cell :containerhub_image_name, :content => link_to_userfile_if_accessible(@tool_config.container_image), :header => "ID of the container image" do |f| %>
<%= f.text_field :container_image_userfile_id %>
@@ -227,8 +227,9 @@
A specification can be either
a full path (e.g. file:/a/b/data.squashfs),
a path with a pattern (e.g. file:/a/b/data*.squashfs),
- a registered file identified by ID (e.g. userfile:123)
- or a SquashFS Data Provider identified by its ID or name (e.g. dp:123, dp:DpNameHere).
+ a registered file identified by ID (e.g. userfile:123),
+ a SquashFS Data Provider identified by its ID or name (e.g. dp:123, dp:DpNameHere)
+ or an ext3 capture overlay basename (e.g. ext3capture:basename=SIZE where size is 12G or 12M).
In the case of a Data Provider, the overlays will be the files that the provider uses.
Each overlay specification should be on a separate line.
You can add comments, indicated with hash symbol #.
@@ -244,6 +245,12 @@
-<%= @bourreau_glob_config.to_bash_prologue if @bourreau_glob_config %>
-<%= @tool_glob_config.to_bash_prologue if @tool_glob_config %>
-<%= @tool_local_config.to_bash_prologue if @tool_local_config %>
+
+<%= @bourreau_glob_config.to_bash_prologue @tool_local_config&.use_singularity? if @bourreau_glob_config %>
+<%= @tool_glob_config.to_bash_prologue @tool_local_config&.use_singularity? if @tool_glob_config %>
+<%= @tool_local_config.to_bash_prologue @tool_local_config&.use_singularity? if @tool_local_config %>
##########################################
#### [Wrapped commands would be here] ####
##########################################
diff --git a/BrainPortal/app/views/userfiles/_default_qc_panel.html.erb b/BrainPortal/app/views/userfiles/_default_qc_panel.html.erb
index cc68d01fd..20af26df6 100644
--- a/BrainPortal/app/views/userfiles/_default_qc_panel.html.erb
+++ b/BrainPortal/app/views/userfiles/_default_qc_panel.html.erb
@@ -23,13 +23,24 @@
-%>
<% if ! @userfile.is_locally_synced? %>
- This file must be synced locally to view QC data.
+
+ This file must be synced locally to view QC data.
+
<% else %>
- <% if @userfile.is_a?(ImageFile) %>
- <%= image_tag url_for(:action => :content, :id => @userfile.id) %>
+ <% div_id_to_replace = "sub_viewer_#{@userfile.id}" %>
+ <% link = data_link(@userfile.name, @userfile, div_id_to_replace) %>
+ <% if link =~ /data-url=/ %>
+
+ <%= link %>
+
+
<% else %>
- This file does not seem to contain any QC data, or no template is available to QC this file type.
+ This file does not seem to contain any QC data, or no template is available to QC this file type.
<% end %>
<% end %>
diff --git a/BrainPortal/app/views/userfiles/_quality_control_panel.html.erb b/BrainPortal/app/views/userfiles/_quality_control_panel.html.erb
index f7c436546..dee09e12e 100644
--- a/BrainPortal/app/views/userfiles/_quality_control_panel.html.erb
+++ b/BrainPortal/app/views/userfiles/_quality_control_panel.html.erb
@@ -36,10 +36,12 @@
<%= link_to_userfile_if_accessible(@userfile, current_user, :html_options => { :target => '_blank' }) %>
-<%= submit_tag "Update (and go to next file)", :name => :update %>
+<% update_message = "Update" %>
+<% update_message += " (and go to next file)" if not_last_file %>
+<%= submit_tag "#{update_message}", :name => :update, :data => {:current_file_id => @userfile.id } %>
diff --git a/BrainPortal/app/views/userfiles/show.html.erb b/BrainPortal/app/views/userfiles/show.html.erb
index fe3e43b73..ac315b56e 100644
--- a/BrainPortal/app/views/userfiles/show.html.erb
+++ b/BrainPortal/app/views/userfiles/show.html.erb
@@ -152,7 +152,7 @@
<% if @userfile.zenodo_doi.starts_with?( ZenodoHelper::ZenodoSandboxDOIPrefix ) %>
Published: <%= link_to_deposit(@userfile.zenodo_deposit_id) %>
<% else %>
- Published: <%= link_to_doi(@userfile.zenodo_doi) %>
+ Published: <%= link_to_zenodo_doi(@userfile.zenodo_doi) %>
<% end %>
<% elsif @userfile.zenodo_deposit_id.present? %>
In progress: <%= link_to_deposit(@userfile.zenodo_deposit_id) %>
@@ -204,26 +204,27 @@
<% if @userfile.archived? %>
+
<%= html_colorize("This #{@userfile.pretty_type} has been archived.", 'red') %>
Content viewers are disabled until the file is unarchived.
- <% else %>
- <% if ! @userfile.can_be_accessed_by?(current_user, :read) %>
+
+ <% elsif ! @userfile.can_be_accessed_by?(current_user, :read) %>
(This file cannot be viewed by you; I wonder how you got here.)
- <% elsif @userfile.data_provider.meta[:no_viewers] %>
+ <% elsif @userfile.data_provider.meta[:no_viewers] %>
(This file cannot be viewed as it is stored on Data Provider
<%= link_to_data_provider_if_accessible(@userfile.data_provider) %>
which is marked as non-viewable)
- <% elsif @userfile.data_provider.not_syncable? %>
+ <% elsif @userfile.data_provider.not_syncable? %>
(This file cannot be viewed as it is stored on Data Provider
<%= link_to_data_provider_if_accessible(@userfile.data_provider) %>
which is configured to not allow synchronizationat all)
- <% elsif @sync_status == "Corrupted" %>
+ <% elsif @sync_status == "Corrupted" %>
(The content of this file seems to be corrupted. This might be the result
@@ -232,26 +233,20 @@
by a task, consider restarting the task's Post Processing stage.)
- <% elsif ! @userfile.data_provider.rr_allowed_syncing? %>
+ <% elsif ! @userfile.data_provider.rr_allowed_syncing? %>
(This file cannot be viewed as it is stored on Data Provider
<%= link_to_data_provider_if_accessible(@userfile.data_provider) %>
which is configured to not allow synchronization to this Portal)
- <% elsif (! @userfile.is_locally_synced?) && (! @userfile.data_provider.online?) %>
+ <% elsif (! @userfile.is_locally_synced?) && (! @userfile.data_provider.online?) %>
(This data is not currently synchronized and its Data Provider
<%= link_to_data_provider_if_accessible(@userfile.data_provider) %>
is offline, so its content is not viewable for the moment)
- <% elsif @userfile.is_locally_synced? && @userfile.viewers_with_applied_conditions.blank? %>
-
- (The contents of this file cannot be viewed: no viewer code available at this moment
- for files of type '<%= @userfile.pretty_type %>')
-
- <% else %>
+ <% elsif ! @userfile.is_locally_synced? %>
- <% if ! @userfile.is_locally_synced? %>
<% if @sync_status =~ /^To/ %>
(This data file is currently being synchronized. Wait a few seconds for this to complete)
<% else %>
@@ -260,8 +255,13 @@
to start the synchronization process.
This may allow you to view displayable content<% if @userfile.is_a?(FileCollection) %> and extract files from this collection<% end %>).
<% end %>
-
- <% end %>
+
+ <% elsif @userfile.viewers_with_applied_conditions.blank? %>
+
+ (The contents of this file cannot be viewed: no viewer code available at this moment
+ for files of type '<%= @userfile.pretty_type %>')
+
+ <% else %>
<% if @userfile.viewers_with_applied_conditions.size > 1 %>
Change view:
@@ -293,8 +293,9 @@
<% end %>
- <% end %>
+
<% end %>
+
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/boutiques_descriptors/new_multi_boutiques_demo.json b/BrainPortal/cbrain_plugins/cbrain-plugins-base/boutiques_descriptors/new_multi_boutiques_demo.json
index acc8e1269..c2d2ec4ef 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/boutiques_descriptors/new_multi_boutiques_demo.json
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/boutiques_descriptors/new_multi_boutiques_demo.json
@@ -133,7 +133,7 @@
"du_report_out": "TextFile"
},
"BoutiquesOutputFilenameRenamer": {
- "my_output_name": "sinput1"
+ "du_report_out": [ "sinput1", "my_output_name" ]
}
}
}
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/bash_scriptor/bourreau/bash_scriptor.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/bash_scriptor/bourreau/bash_scriptor.rb
index 5745079f0..4ae3f5457 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/bash_scriptor/bourreau/bash_scriptor.rb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/bash_scriptor/bourreau/bash_scriptor.rb
@@ -57,6 +57,7 @@ def setup #:nodoc:
def cluster_commands #:nodoc:
params = self.params
file_ids = params[:interface_userfile_ids] || []
+ File.unlink(self.stdout_cluster_filename) rescue nil # needed in case of retries
raw_text = params[:bash_script]
raw_text.tr!("\r","") # text areas have CRs in line terminators, yuk!
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/portal/boutiques_descriptor_maker_handler.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/portal/boutiques_descriptor_maker_handler.rb
index 86fffe623..3ab1847ce 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/portal/boutiques_descriptor_maker_handler.rb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/portal/boutiques_descriptor_maker_handler.rb
@@ -56,6 +56,7 @@ def descriptor_for_form
self.errors.add(:base, "Your descriptor has syntax errors")
desc_user_posted = self.descriptor_when_json_error
end
+ desc_user_posted.delete(:groups) if desc_user_posted.groups.blank?
added_input = self.boutiques_descriptor.input_by_id('_bdm_json_descriptor').dup
desc_user_posted.inputs.unshift(added_input)
desc_user_posted
@@ -76,6 +77,7 @@ def before_form
# there is nothing to launch.
def after_form
desc = descriptor_for_form
+
if self.errors.empty?
self.bosh_validation_messages = generate_validation_messages(desc)
if self.bosh_validation_messages.to_s.strip != "OK"
@@ -84,6 +86,16 @@ def after_form
self.bosh_command_preview = generate_command_preview(desc, self.invoke_params)
end
end
+
+ if self.errors.empty? && (params[:_bdm_reorder] == 'on' || params[:_bdm_pad] == 'on')
+ btq = descriptor_from_posted_form
+ btq = btq.pretty_ordered if params[:_bdm_reorder] == 'on'
+ btq.delete(:groups) if btq.groups.blank?
+ json = btq.super_pretty_json if params[:_bdm_pad] == 'on'
+ json ||= JSON.pretty_generate(btq)
+ self.invoke_params[:_bdm_json_descriptor] = json
+ end
+
if self.errors.empty?
# We must add at least one error to prevent CBRAIN from attempting to launch something.
self.errors.add(:base, <<-ALL_OK
@@ -93,6 +105,7 @@ def after_form
ALL_OK
)
end
+
""
end
@@ -116,6 +129,20 @@ def descriptor_from_posted_form #:nodoc:
text = descriptor_text_from_posted_form
return nil unless text
desc = BoutiquesSupport::BoutiquesDescriptor.new_from_string(text) rescue nil
+
+ # Check for something bosh doesn't verify: input IDs mentioned in groups
+ # that do not exist
+ zap_it = false
+ (desc&.groups || []).each do |group|
+ members = group.members || []
+ badid = members.detect { |inputid| (desc.input_by_id(inputid) rescue nil).nil? }
+ if badid
+ self.errors.add(:base, "The group '#{group.name}' has a member input id '#{badid}' which doesn't exist")
+ zap_it = true
+ end
+ end
+ desc = nil if zap_it
+
desc
end
@@ -131,7 +158,7 @@ def generate_validation_messages(desc) #:nodoc:
rescue => ex
return "Bosh validation failed: #{ex.class} #{ex.message}"
ensure
- File.unlink tmpfile
+ File.unlink(tmpfile) rescue nil
end
# Invokes bosh to generate a command preview.
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/views/_boutiques_preview.html.erb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/views/_boutiques_preview.html.erb
index 3776dff98..92844ed13 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/views/_boutiques_preview.html.erb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/boutiques_descriptor_maker_handler/views/_boutiques_preview.html.erb
@@ -26,6 +26,8 @@
<%- end -%>
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/diagnostics/bourreau/diagnostics.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/diagnostics/bourreau/diagnostics.rb
index b1e7a2289..46cdf29c7 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/diagnostics/bourreau/diagnostics.rb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/cbrain_task/diagnostics/bourreau/diagnostics.rb
@@ -168,11 +168,11 @@ def cluster_commands #:nodoc:
echo ""
echo "==== Host Info ===="
- uname -a
- uptime
+ uname -a 2>/dev/null
+ uptime 2>/dev/null
echo ""
- if test -n "$(type lsb_release)" ; then
+ if test -n "$(type -p lsb_release)" ; then
echo "==== LSB Release ===="
lsb_release -a
echo ""
@@ -185,8 +185,8 @@ def cluster_commands #:nodoc:
fi
if test -e /proc/cpuinfo ; then
- echo "==== Last CPU Info ===="
- cat /proc/cpuinfo | perl -ne '@x=grep(/./,<>);unshift(@y,pop(@x)) while @x > 0 && $y[0] !~ /^processor/; END { print @y }'
+ echo "==== Compacted CPU Info ===="
+ cat /proc/cpuinfo | sort | uniq | grep -v -E 'apicid|^processor|core id'
echo ""
fi
@@ -206,6 +206,10 @@ def cluster_commands #:nodoc:
ls -la
echo ""
+ echo "==== Listing Content of Work Directory With Dereferencing ===="
+ ls -laL
+ echo ""
+
_DIAGNOSTIC_COMMANDS_
file_ids.each do |id|
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/apptainer_image/apptainer_image.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/apptainer_image/apptainer_image.rb
new file mode 100644
index 000000000..8e77a7eb6
--- /dev/null
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/apptainer_image/apptainer_image.rb
@@ -0,0 +1,59 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This model represents a single file containing an Apptainer container image.
+class ApptainerImage < FilesystemImage
+
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ has_viewer :name => 'Image Info', :partial => :info, :if => :is_viewable?
+
+ def self.file_name_pattern #:nodoc:
+ /\.s?img\z|\.sif\z/i
+ end
+
+ def is_viewable? #:nodoc:
+ if ! self.has_apptainer_support?
+ return [ "The local portal doesn't support inspecting Apptainer images." ]
+ elsif ! self.is_locally_synced?
+ return [ "Apptainer image file not yet synchronized" ]
+ else
+ true
+ end
+ end
+
+ def has_apptainer_support? #:nodoc:
+ self.class.has_apptainer_support?
+ end
+
+ # Detects if the system has the 'apptainer' command.
+ # Caches the result in the class so it won't need to
+ # be detected again after the first time, for the life
+ # of the current process.
+ def self.has_apptainer_support? #:nodoc:
+ return @_has_apptainer_support if ! @_has_apptainer_support.nil?
+ out = IO.popen("bash -c 'type -p apptainer'","r") { |f| f.read }
+ @_has_apptainer_support = out.present?
+ end
+
+end
+
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/apptainer_image/views/_info.html.erb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/apptainer_image/views/_info.html.erb
new file mode 100644
index 000000000..f3c3b07f5
--- /dev/null
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/apptainer_image/views/_info.html.erb
@@ -0,0 +1,47 @@
+
+<%
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+%>
+
+<% path = @userfile.cache_full_path.to_s.bash_escape %>
+<% cat = lambda { |com| IO.popen(com,"r") { |fh| fh.read } } %>
+
+
+
+<% end %>
+
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/cbrain_file_list.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/cbrain_file_list.rb
index 62139f5d7..aebfca32d 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/cbrain_file_list.rb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/cbrain_file_list.rb
@@ -96,7 +96,7 @@ def load_from_content(csv_file_content)
# as extracted by cached_csv_array(). IDs will be numeric, or for
# missing rows, will contain nils. IDs can be zero.
#
- # [ 12, 0, 45, nil nil, 433 ]
+ # [ 12, 0, 45, nil, nil, 433 ]
#
# Note that this method caches internally its result. To clear the
# cache (if the userfile's content has changed for instance) call
@@ -241,15 +241,14 @@ def flush_internal_caches
# for the array of +userfiles+. nil entries are allowed in +userfiles+
# and will be properly encoded as missing rows with ID set to 0.
def self.create_csv_file_from_userfiles(userfiles)
- userfile_model_hash = Userfile.columns_hash
text_rows = []
assoc_cache = {}
userfiles.each do |userfile|
row = []
if (userfile.nil?)
- row = [0] + Array.new(ATTRIBUTES_LIST.size - 1, "")
+ row = [0] + Array.new(self::ATTRIBUTES_LIST.size - 1, "")
else
- ATTRIBUTES_LIST.each do |att|
+ self::ATTRIBUTES_LIST.each do |att|
val = userfile.send(att) # attribute value in mode; can be an id of an assoc
if att =~ /_id$/ # try to look up names in other models
assoc_cache[[att,val]] ||= ( userfile.send(att.to_s.sub(/_id$/,"")).try(att == :user_id ? :login : :name) || "-")
@@ -268,4 +267,33 @@ def self.create_csv_file_from_userfiles(userfiles)
csv_file
end
+ # This is like CbrainFileList.create!() but you must
+ # also provide :userfiles among the attributes; these
+ # userfiles will be stored as the content of the created
+ # CbrainFileList.
+ def self.create_with_userfiles!(attributes)
+ userfiles = attributes[:userfiles] || cb_error("Need some userfiles for CbrainFileList")
+ attlist = attributes.reject { |k,v| k.to_s == 'userfiles' }
+ cbfile = self.create!(attlist)
+ cbfile.set_userfiles(userfiles)
+ cbfile
+ end
+
+ # Replace the content of the CbrainFileList with a new
+ # CbrainFileList representing +userfiles+. The content
+ # of the CSV will be immediately uploaded to the provider.
+ def set_userfiles(userfiles)
+ flush_internal_caches()
+ csv = self.class.create_csv_file_from_userfiles(userfiles)
+ self.cache_writehandle { |fh| fh.write csv }
+ self
+ end
+
+ private
+
+ # Can be redefine in sub class
+ def self.userfile_model_hash
+ Userfile.columns_hash
+ end
+
end
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/views/_cb_file_list.html.erb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/views/_cb_file_list.html.erb
index fe73edb0b..cdfa2695b 100644
--- a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/views/_cb_file_list.html.erb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/cbrain_file_list/views/_cb_file_list.html.erb
@@ -90,7 +90,7 @@
<% val = cvs_row[idx] %>
<% if att == :id %>
- <% cur_file = Userfile.find_all_accessible_by_user(current_user).where(:id => val).first %>
+ <% cur_file = Userfile.find_all_accessible_by_user(current_user, :access_requested => :read).where(:id => val).first %>
<%= val %> : <%= link_to_userfile_if_accessible(cur_file) %>
<% else %>
<%= val.nil? ? "-" : val %>
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/extended_cbrain_file_list/extended_cbrain_file_list.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/extended_cbrain_file_list/extended_cbrain_file_list.rb
new file mode 100644
index 000000000..f23587b18
--- /dev/null
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/extended_cbrain_file_list/extended_cbrain_file_list.rb
@@ -0,0 +1,131 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2012
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This class extand the attributes_list array used by the
+# CbrainFileList by adding an extra key that correspond
+# to the last column of the CSV file.
+#
+# Example of file content:
+#
+# 232123,"myfile.txt",425,"TextFile","MainStoreProvider","jsmith","mygroup","{extra_param_1: value_1}"
+# 112233,"plan.pdf",3894532,"SingleFile","SomeDP","jsmith","secretproject", "{extra_param_2: value_2}"
+# 0,,,,,,,
+# 933,"hello.txt",3433434,"TextFile","SomeDP","jsmith","mygroup","{extra_param_3: value_3}"
+#
+class ExtendedCbrainFileList < CbrainFileList
+
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # Structure of the CSV file; only the ID is used when this object is used as input to something else.
+ # When displayed in a web page, the associations to other models are shown by name.
+ ATTRIBUTES_LIST = superclass::ATTRIBUTES_LIST + [ :json_params ]
+
+ def self.pretty_type #:nodoc:
+ "Extended CBRAIN List of files"
+ end
+
+ # Returns an hash extract from the last column of the Extended CBCsv file
+ # as extracted by cached_csv_array(). Value will be a hash (can be empty)
+ #
+ # [ {key_param_1_task_1: value_for_param_1_task_1, key_param_2_task_1: value_for_param_2_task_1},
+ # {key_param_1_task_1: value_for_param_1_task_1},
+ # {},
+ # {},
+ # {key_param_1_task_5: value_for_param_1_task_5, key_param_2_task_5: value_for_param_2_task_5}
+ # ]
+ #
+ # Note that this method caches internally its result. To clear the
+ # cache (if the userfile's content has changed for instance) call
+ # the method flush_internal_caches().
+ #
+ def ordered_params()
+ json_params_idx = ATTRIBUTES_LIST.index(:json_params)
+ @extra_params ||= cached_csv_array.map do |row|
+ JSON.parse(row[json_params_idx])
+ end
+
+ @extra_params
+ end
+
+ # Many methods of this class cache their result internally
+ # to avoid reduplicating costly work. If the content of
+ # the CSV file change, calling flush_internal_caches() will
+ # clean these caches so they return new, accurate results.
+ def flush_internal_caches
+ super
+ @extra_params = nil
+ end
+
+ # ["a/b/c", "a/d/e", "x/y/z", "x/w/a"]
+ # return {"a" => ["a/b/c", "a/d/f"]
+ # "x" => ["x/y/z", "x/w/a"]
+ # }
+ def self.roots_to_fullpaths(relpaths)
+ # Special situation when a file with a path
+ # is specified instead of just a basename.
+ relpaths.inject({}) do |results,relpath|
+ filenames = Pathname.new(relpath).each_filename.to_a
+ # E.g: root == sub-123
+ parent_dir = filenames.first
+ res = results[parent_dir] ||= []
+ res << relpath if filenames.size != 1
+ results
+ end
+ end
+
+ # Add json_params reader method to userfile object
+ def self.extend_userfile_json_params_reader(userfile,json_params_value)
+ userfile.define_singleton_method(:json_params) {
+ json_params_value
+ }
+ end
+
+ # Extend each userfile with json_params_reader
+ def self.extended_userfiles_by_name(userfiles,id_to_values)
+ userfiles.to_a.each do |userfile|
+ extend_userfile_json_params_reader(userfile,id_to_values[userfile.name])
+ end
+ userfiles
+ end
+
+ # Extended Userfile.columns_hash with json_params key
+ def self.userfile_model_hash
+ extended_userfile_model_hash = Userfile.columns_hash.dup
+ extended_userfile_model_hash["json_params"].define_singleton_method(:type) { :hash }
+ extended_userfile_model_hash
+ end
+
+ # add json_params reader method to userfile object
+ def self.extend_userfile_json_params_reader(userfile,json_params_value)
+ userfile.define_singleton_method(:json_params) {
+ json_params_value
+ }
+ end
+
+ # userfile_name => {Id: values}
+ def self.extended_userfiles_by_name(userfiles,id_to_values)
+ userfiles.to_a.each do |userfile|
+ extend_userfile_json_params_reader(userfile,id_to_values[userfile.name])
+ end
+ userfiles
+ end
+end
diff --git a/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/squashfs_file/squashfs_file.rb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/squashfs_file/squashfs_file.rb
new file mode 100644
index 000000000..ffb344be2
--- /dev/null
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/squashfs_file/squashfs_file.rb
@@ -0,0 +1,62 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# Model for filesystem files in SquashFS format.
+class SquashfsFile < SingleFile
+
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ has_viewer :name => 'SquashFS Filesystem', :partial => :squashfs_file, :if => :is_viewable?
+
+ def self.file_name_pattern #:nodoc:
+ /\.(sqs|squashfs|sqfs|sfs)\z/i
+ end
+
+ def self.pretty_type #:nodoc:
+ "SquashFS Filesystem File"
+ end
+
+ def is_viewable? #:nodoc:
+ if ! self.has_unsquashfs_support?
+ return [ "The local portal doesn't support inspecting SquashFS images." ]
+ elsif ! self.is_locally_synced?
+ return [ "The SquashFS image file is not yet synchronized" ]
+ else
+ true
+ end
+ end
+
+ def has_unsquashfs_support? #:nodoc:
+ self.class.has_unsquashfs_support?
+ end
+
+ # Detects if the system has the 'unsquashfs' command.
+ # Caches the result in the class so it won't need to
+ # be detected again after the first time, for the life
+ # of the current process.
+ def self.has_unsquashfs_support? #:nodoc:
+ return @_has_unsquashfs_support if ! @_has_unsquashfs_support.nil?
+ out = IO.popen("bash -c 'type -p unsquashfs'","r") { |f| f.read }
+ @_has_unsquashfs_support = out.present?
+ end
+
+end
diff --git a/BrainPortal/app/views/service/support.html.erb b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/squashfs_file/views/_squashfs_file.html.erb
similarity index 64%
rename from BrainPortal/app/views/service/support.html.erb
rename to BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/squashfs_file/views/_squashfs_file.html.erb
index ded686517..fccb6e610 100644
--- a/BrainPortal/app/views/service/support.html.erb
+++ b/BrainPortal/cbrain_plugins/cbrain-plugins-base/userfiles/squashfs_file/views/_squashfs_file.html.erb
@@ -3,7 +3,7 @@
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -22,15 +22,15 @@
#
-%>
-<% title 'Service Support' %>
+<% path = @userfile.cache_full_path.to_s.bash_escape %>
+<% cat = lambda { |com| IO.popen(com,"r") { |fh| fh.read } } %>
+<% max_list = 300 %>
-
SquashFS File List (first <%= max_list %> entries only)
+
+
<%= cat.("unsquashfs -p 1 -lls #{path} | head -#{max_list}") %>
diff --git a/BrainPortal/config/console_rc/lib/interactive_bourreau_control.rb b/BrainPortal/config/console_rc/lib/interactive_bourreau_control.rb
index ada09bed3..d791343b0 100644
--- a/BrainPortal/config/console_rc/lib/interactive_bourreau_control.rb
+++ b/BrainPortal/config/console_rc/lib/interactive_bourreau_control.rb
@@ -20,6 +20,8 @@
# along with this program. If not, see .
#
+require 'reline' # Readline.get_screen_size fails me
+
# We need some sort of constant to refer to the console's
# context, which has access to all the pretty helpers etc.
ConsoleCtx = self # also in pretty_view.rb in the same directory
@@ -56,7 +58,7 @@ def initialize(bourreaux_list = Bourreau.order(:id).all, term_width = nil)
@bourreaux = bourreaux_list
@width = term_width
if term_width.blank? || term_width.to_i < 1
- _,numcols = Readline.get_screen_size rescue [25,120]
+ _,numcols = Reline.get_screen_size rescue [25,120]
@width = numcols
end
@selected = {}
@@ -113,7 +115,7 @@ def interactive_control(initial_command = nil)
OPERATIONS
userinput = initial_command.presence
- userinput ||= Readline.readline("Do something (h for help): ",false)
+ userinput ||= Reline.readline("Do something (h for help): ",false)
userinput = "Q" if userinput.nil?
inputkeywords = userinput.downcase.split(/\W+/).map(&:presence).compact
@@ -124,7 +126,7 @@ def interactive_control(initial_command = nil)
end
puts ""
if dowait && initial_command.blank?
- Readline.readline("Press RETURN to continue: ",false)
+ Reline.readline("Press RETURN to continue: ",false)
puts ""
end
initial_command = nil
@@ -320,7 +322,12 @@ def process_user_letter(letter) #:nodoc:
uptime &&= ConsoleCtx.send(:pretty_elapsed, uptime, :num_components => 2)
uptime &&= "up for #{uptime}"
uptime ||= "DOWN"
- printf "%#{max_size}s rev %-9.9s %s, %d/%d workers\n", bou.name, gitrev, uptime, numworkers, expworkers
+ color_on = color_off = nil
+ color_on = "\e[31m" if uptime == 'DOWN' # RED for down bourreaux
+ color_on ||= "\e[33m" if numworkers != expworkers # YELLOW for missing workers
+ color_on ||= "\e[32m" # GREEN when everything ok
+ color_off = "\e[0m" if color_on
+ printf "#{color_on}%#{max_size}s rev %-9.9s %s, %d/%d workers#{color_off}\n", bou.name, gitrev, uptime, numworkers, expworkers
end
end
return true
@@ -352,7 +359,7 @@ def process_user_letter(letter) #:nodoc:
puts " * @r@ will be substituted by the Bourreau's RAILS root path"
puts " * @d@ will be substituted by the Bourreau's DP cache dir path"
puts " * @g@ will be substituted by the Bourreau's gridshare dir path"
- comm = Readline.readline("Bash command: ")
+ comm = Reline.readline("Bash command: ")
bash_command_on_bourreaux(comm)
return true
end
diff --git a/BrainPortal/config/console_rc/lib/reports.rb b/BrainPortal/config/console_rc/lib/reports.rb
index 97dccd0d5..1e674fd12 100644
--- a/BrainPortal/config/console_rc/lib/reports.rb
+++ b/BrainPortal/config/console_rc/lib/reports.rb
@@ -54,6 +54,11 @@ def acttasks(tasks = CbrainTask.active.all)
result
end
+ # Remove column c_types if it's empty everywhere
+ if list1.all? { |struct| struct[:c_types].blank? }
+ list1.each { |struct| struct.delete(:c_types) }
+ end
+
# Remove duplicates from list1 and count them
seen={}
list2 = list1.select { |r| seen[r] ||= 0 ; seen[r] += 1 ; seen[r] == 1 }
diff --git a/BrainPortal/config/initializers/cbrain.rb b/BrainPortal/config/initializers/cbrain.rb
index 8a7391f74..478881eaa 100644
--- a/BrainPortal/config/initializers/cbrain.rb
+++ b/BrainPortal/config/initializers/cbrain.rb
@@ -95,6 +95,9 @@ def self.spawn_with_active_records(destination = nil, taskname = 'Internal Backg
reader,writer = IO.pipe # The stream that we use to send the subchild's pid to the parent
childpid = Kernel.fork do
+ # Need to properly tell MySQL/MariaDB that we're disconnecting
+ ApplicationRecord.connection.disconnect! rescue nil
+
# Child code starts here
reader.close # Not needed in the child!
@@ -198,6 +201,9 @@ def self.spawn_fully_independent(taskname = 'Independent Background Task')
reader,writer = IO.pipe # The stream that we use to send the subchild's pid to the parent
childpid = Kernel.fork do
+ # Need to properly tell MySQL/MariaDB that we're disconnecting
+ ApplicationRecord.connection.disconnect! rescue nil
+
# Child code starts here
reader.close # Not needed in the child!
diff --git a/BrainPortal/config/initializers/cors.rb b/BrainPortal/config/initializers/cors.rb
new file mode 100644
index 000000000..4420121d9
--- /dev/null
+++ b/BrainPortal/config/initializers/cors.rb
@@ -0,0 +1,12 @@
+
+Rails.application.config.middleware.insert_before 0, Rack::Cors do
+ allow do
+ # This is a dummy configuration.
+ # Adjust as needed.
+ origins 'https://example.com:8888'
+ resource '/doesnotexist',
+ :headers => :any,
+ :methods => [:get]
+ end
+end
+
diff --git a/BrainPortal/config/initializers/validation_portal.rb b/BrainPortal/config/initializers/validation_portal.rb
index 1df6e1d5b..8244286fc 100644
--- a/BrainPortal/config/initializers/validation_portal.rb
+++ b/BrainPortal/config/initializers/validation_portal.rb
@@ -83,8 +83,8 @@
#
# Rake Exceptions By First Argument
#
- skip_validations_for = [ /^db:/, /^cbrain:plugins/, /^cbrain:test/, /^route/, /^assets/, /^cbrain:nagios/ ]
- first_arg = ARGV.detect { |x| x =~ /^[\w:]+$/i } # first thing that looks like abc:def:ghi
+ skip_validations_for = [ /^db:/, /^cbrain:plugins/, /^cbrain:test/, /^route/, /^assets/, /^cbrain:nagios/, /^cbrain:boutiques:rewrite/ ]
+ first_arg = ARGV.detect { |x| x =~ /^[\w:]+/i } # first thing that looks like abc:def:ghi
first_arg ||= '(none)'
if skip_validations_for.any? { |p| first_arg =~ p }
#------------------------------------------------------------------------------
diff --git a/BrainPortal/config/routes.rb b/BrainPortal/config/routes.rb
index 9b0605940..1a283d135 100644
--- a/BrainPortal/config/routes.rb
+++ b/BrainPortal/config/routes.rb
@@ -2,7 +2,7 @@
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -41,7 +41,11 @@
resources :custom_filters, :except => [ :index ]
resources :tags, :except => [ :new, :edit ]
resources :access_profiles, :except => [ :edit ]
- resources :disk_quotas, :only => [ :new, :index, :show, :create, :destroy, :update ]
+ resources :disk_quotas, :only => [ :new, :index, :show, :create, :destroy, :update ] do
+ collection do
+ get 'report'
+ end
+ end
# Standard CRUD resources, with extra actions
@@ -111,10 +115,13 @@
get 'report'
post 'report'
post 'repair'
+ post 'check_personal'
end
collection do
get 'dp_access'
get 'dp_transfers'
+ get 'new_personal'
+ post 'create_personal'
end
end
@@ -195,6 +202,7 @@
get '/about_us' => 'portal#about_us'
get '/available' => 'portal#available'
get '/search' => 'portal#search'
+ get '/stats' => 'portal#stats'
get '/login' => 'sessions#new'
get '/logout' => 'sessions#destroy'
get '/session_status' => 'sessions#show'
@@ -258,26 +266,6 @@
- ####################################################################################
- # Service; most of these actions are only needed
- # for the CANARIE monitoring system, and are therefore
- # shipped disabled by default, because it's not needed
- # anywhere else.
- ####################################################################################
- #get '/platform/info', :controller => :service, :action => :info
- #get '/platform/stats', :controller => :service, :action => :stats
- #get '/platform/detailed_stats', :controller => :service, :action => :detailed_stats
- #get '/platform/doc', :controller => :service, :action => :doc
- #get '/platform/releasenotes', :controller => :service, :action => :releasenotes
- #get '/platform/support', :controller => :service, :action => :support
- #get '/platform/source', :controller => :service, :action => :source
- #get '/platform/tryme', :controller => :service, :action => :tryme
- #get '/platform/licence', :controller => :service, :action => :licence
- #get '/platform/provenance', :controller => :service, :action => :provenance
- #get '/platform/factsheet', :controller => :service, :action => :factsheet
-
-
-
####################################################################################
# NeuroHub routes
####################################################################################
diff --git a/BrainPortal/data_dumps/README.md b/BrainPortal/data_dumps/README.md
index 50bebafd8..09bde1c62 100644
--- a/BrainPortal/data_dumps/README.md
+++ b/BrainPortal/data_dumps/README.md
@@ -24,5 +24,33 @@ resources that no longer exist.
The `reload` task requires a timestamp in argument
(e.g. `2021-12-31T120856`).
+### Standard regular maintenance
+
+On a system with a large amount of activity, a regular cleanup
+of the ResourceUsage table is necessary. The process is performed
+in two steps:
+
+First dump all resource usage objects that refer to objects
+no longer existing in the database, and remove them from
+the database:
+
+```
+ RAILS_ENV=something rake cbrain:resource_usage:dump[DESTROY_ALL,no]
+```
+
+Second, re-insert monthly summaries of all removed records so that
+total historical usage by users is maintained:
+
+```
+ RAILS_ENV=something rake cbrain:resource_usage:monthly[All]
+```
+
+Note that this last step will re-create all monthly summaries
+cumulatively using the info in all previous YAML dumps. This
+rake task can safely be run multiple times, it will not duplicate
+summary information.
+
+### See also
+
See the file [BrainPortal/lib/tasks/resource_usage_serialization.rake](https://github.com/aces/cbrain/blob/master/BrainPortal/lib/tasks/resource_usage_serialization.rake)
for more information about the rake tasks.
diff --git a/BrainPortal/db/migrate/20221007094232_remove_old_remote_ressources_attributes.rb b/BrainPortal/db/migrate/20221007094232_remove_old_remote_ressources_attributes.rb
new file mode 100644
index 000000000..53c5d7fae
--- /dev/null
+++ b/BrainPortal/db/migrate/20221007094232_remove_old_remote_ressources_attributes.rb
@@ -0,0 +1,7 @@
+class RemoveOldRemoteRessourcesAttributes < ActiveRecord::Migration[5.0]
+ def change
+ remove_column :remote_resources, :nh_email_delivery_options, :text, :after => :nh_system_from_email
+ remove_column :remote_resources, :tunnel_mysql_port, :integer
+ remove_column :remote_resources, :tunnel_actres_port, :integer
+ end
+end
diff --git a/BrainPortal/db/migrate/20230304184206_add_short_task_workdir_to_tool_configs.rb b/BrainPortal/db/migrate/20230304184206_add_short_task_workdir_to_tool_configs.rb
new file mode 100644
index 000000000..d8a023571
--- /dev/null
+++ b/BrainPortal/db/migrate/20230304184206_add_short_task_workdir_to_tool_configs.rb
@@ -0,0 +1,28 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+class AddShortTaskWorkdirToToolConfigs < ActiveRecord::Migration[5.0]
+ def change
+ add_column :tool_configs, :singularity_use_short_workdir, :boolean,
+ :default => false, :null => false, :after => :singularity_overlays_specs
+ end
+end
diff --git a/BrainPortal/db/migrate/20230418205141_add_unique_sync_status_index.rb b/BrainPortal/db/migrate/20230418205141_add_unique_sync_status_index.rb
new file mode 100644
index 000000000..3b4c59691
--- /dev/null
+++ b/BrainPortal/db/migrate/20230418205141_add_unique_sync_status_index.rb
@@ -0,0 +1,10 @@
+class AddUniqueSyncStatusIndex < ActiveRecord::Migration[5.0]
+ def up
+ remove_index :sync_status, [ :userfile_id, :remote_resource_id ]
+ add_index :sync_status, [ :userfile_id, :remote_resource_id ], :unique => true
+ end
+
+ def down
+ remove_index :sync_status, [ :userfile_id, :remote_resource_id ]
+ end
+end
diff --git a/BrainPortal/db/schema.rb b/BrainPortal/db/schema.rb
index 3ce3e9c33..bdb3f4dc2 100644
--- a/BrainPortal/db/schema.rb
+++ b/BrainPortal/db/schema.rb
@@ -10,7 +10,7 @@
#
# It's strongly recommended that you check this file into your version control system.
-ActiveRecord::Schema.define(version: 20220913183448) do
+ActiveRecord::Schema.define(version: 20230418205141) do
create_table "access_profiles", force: :cascade, options: "ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci" do |t|
t.string "name", null: false
@@ -263,8 +263,6 @@
t.string "ssh_control_host"
t.integer "ssh_control_port"
t.string "ssh_control_rails_dir"
- t.integer "tunnel_mysql_port"
- t.integer "tunnel_actres_port"
t.string "cache_md5"
t.boolean "portal_locked", default: false, null: false
t.integer "cache_trust_expire", default: 0
@@ -290,7 +288,6 @@
t.text "email_delivery_options", limit: 65535
t.string "nh_support_email"
t.string "nh_system_from_email"
- t.text "nh_email_delivery_options", limit: 65535
t.string "external_status_page_url"
t.string "docker_executable_name"
t.string "singularity_executable_name"
@@ -411,7 +408,7 @@
t.datetime "accessed_at"
t.datetime "synced_at"
t.index ["remote_resource_id"], name: "index_sync_status_on_remote_resource_id", using: :btree
- t.index ["userfile_id", "remote_resource_id"], name: "index_sync_status_on_userfile_id_and_remote_resource_id", using: :btree
+ t.index ["userfile_id", "remote_resource_id"], name: "index_sync_status_on_userfile_id_and_remote_resource_id", unique: true, using: :btree
t.index ["userfile_id"], name: "index_sync_status_on_userfile_id", using: :btree
end
@@ -441,12 +438,12 @@
create_table "tool_configs", force: :cascade, options: "ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci" do |t|
t.string "version_name"
- t.text "description", limit: 65535
+ t.text "description", limit: 65535
t.integer "tool_id"
t.integer "bourreau_id"
- t.text "env_array", limit: 65535
- t.text "script_prologue", limit: 65535
- t.text "script_epilogue", limit: 65535
+ t.text "env_array", limit: 65535
+ t.text "script_prologue", limit: 65535
+ t.text "script_epilogue", limit: 65535
t.datetime "created_at"
t.datetime "updated_at"
t.integer "group_id"
@@ -463,9 +460,10 @@
t.string "containerhub_image_name"
t.string "container_engine"
t.string "container_index_location"
- t.text "singularity_overlays_specs", limit: 65535
+ t.text "singularity_overlays_specs", limit: 65535
+ t.boolean "singularity_use_short_workdir", default: false, null: false
t.string "container_exec_args"
- t.boolean "inputs_readonly", default: false
+ t.boolean "inputs_readonly", default: false
t.string "boutiques_descriptor_path"
t.index ["bourreau_id"], name: "index_tool_configs_on_bourreau_id", using: :btree
t.index ["tool_id"], name: "index_tool_configs_on_tool_id", using: :btree
diff --git a/BrainPortal/lib/boutiques_boot_integrator.rb b/BrainPortal/lib/boutiques_boot_integrator.rb
index fcd3fe1de..6b3dd9b49 100644
--- a/BrainPortal/lib/boutiques_boot_integrator.rb
+++ b/BrainPortal/lib/boutiques_boot_integrator.rb
@@ -95,7 +95,7 @@ def self.link_from_json_file(path)
# This method scans a directory for JSON boutiques descriptors and
# loads them all.
def self.link_all(dir = CBRAIN::BoutiquesDescriptorsPlugins_Dir)
- jsons=Dir.glob(Pathname.new(dir) + "*.json")
+ jsons=Dir.glob(Pathname.new(dir) + "*.json").sort
jsons.each do |json|
self.link_from_json_file(json)
end
diff --git a/BrainPortal/lib/boutiques_collection_basenames_list_maker.rb b/BrainPortal/lib/boutiques_collection_basenames_list_maker.rb
new file mode 100644
index 000000000..81e140e79
--- /dev/null
+++ b/BrainPortal/lib/boutiques_collection_basenames_list_maker.rb
@@ -0,0 +1,98 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This module will extract pattern based on a regex
+# specified in the descriptor by:
+#
+# "cbrain:integrator_modules": {
+# "BoutiquesCollectionBasenamesListMaker": [ "input_id", "FileCollection", "A regex to extract prefix from the file name" ]
+# }
+#
+# It is possible to specify a FileCollection type in order to know on which
+# input file to apply the regex.
+#
+# The extracted names will populate the input with value-choices
+# according to what was extracted.
+#
+# Then one task will be created for each choice.
+#
+# *Note*: The usage of this module is not usable with a Cbcsv file.
+# If it will be used with a Cbcsv file, the Cbcsv file will be the input file
+# and one task will be created by sample.
+#
+# Should be usable with ExtendedCbcsv file though.
+#
+module BoutiquesCollectionBasenamesListMaker
+
+ # Note: to access the revision info of the module,
+ # you need to access the constant directly, the
+ # object method revision_info() won't work.
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # If more than one choice is detected in the FileCollection,
+ # the input_id option will be populated with prefix
+ # extracted based on pattern specified by the regex in the descriptor.
+ def descriptor_for_form #:nodoc:
+ descriptor = super.dup()
+ input_id, type, regex_string, = descriptor.custom_module_info('BoutiquesCollectionBasenamesListMaker')
+
+ # Get the userfile_ids from the params
+ # Return immediately if there is no exactly one file.
+ userfile_ids = params["interface_userfile_ids"] || []
+ type_class = type.constantize
+ userfiles = Userfile.find(userfile_ids).select{|x| x.is_a?(type_class)}
+ return descriptor if userfiles.count != 1
+
+ # Fill the input with the list of prefix
+ regex = Regexp.new(regex_string)
+ userfile = userfiles.first
+ file_names = userfile.provider_collection_index(:top, :regular).map(&:name)
+ file_names.map! {|x| Pathname.new(x).basename.to_s }
+ input = descriptor.input_by_id(input_id)
+
+ input["value-choices"] = file_names.map do |f_n|
+ f_n.match(regex) && Regexp.last_match[1]
+ end.compact.uniq
+
+ descriptor
+ end
+
+ # One task will be created by value of
+ # the input specified in the descriptor.
+ def final_task_list #:nodoc:
+ descriptor = self.descriptor_for_final_task_list
+ input_id, _, _ = descriptor.custom_module_info('BoutiquesCollectionBasenamesListMaker')
+
+ params_values = self.invoke_params[input_id]
+ return super if params_values.blank? || params_values.size == 1
+
+ # Create one task for each value
+ params_values.map do |value|
+ task = self.dup
+ task.description = task.description || ""
+ task.description += "\n\nRun with value: #{value}, for input #{input_id}."
+ task.invoke_params[input_id] = [value]
+ task
+ end
+
+ end
+ end
diff --git a/BrainPortal/lib/boutiques_ext3_capturer.rb b/BrainPortal/lib/boutiques_ext3_capturer.rb
new file mode 100644
index 000000000..9b9bf15fa
--- /dev/null
+++ b/BrainPortal/lib/boutiques_ext3_capturer.rb
@@ -0,0 +1,65 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This module adds automatic setting up of mounted
+# ext3 filesystem as subdirectories of a task, provided
+# the tool works in Singularity/Apptainer.
+# It is the exact equivalent of adding an ext3 overlay
+# configuration entry in the task's tool config.
+#
+# To include the module automatically at boot time
+# in a task integrated by Boutiques, add a new entry
+# in the 'custom' section of the descriptor, like this:
+#
+# "custom": {
+# "cbrain:integrator_modules": {
+# "BoutiquesExt3Capturer": {
+# "work": "50g",
+# "tmpdir": "20m"
+# }
+# }
+# }
+#
+module BoutiquesExt3Capturer
+
+ # Note: to access the revision info of the module,
+ # you need to access the constant directly, the
+ # object method revision_info() won't work.
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # Override the default behavior by adding new entries directly
+ # from the descriptor.
+ def ext3capture_basenames
+ # Get standard list as described in tool config
+ initial_list = super.dup # [ [ basename, size], [basename, size], ... ]
+
+ # Get values in descriptor, as a hash
+ descriptor = self.descriptor_for_cluster_commands
+ ext3_specs = descriptor.custom_module_info('BoutiquesExt3Capturer')
+
+ # Append our own entries; note that duplications of basenames
+ # will mean only the first entry is used!
+ initial_list + ext3_specs.to_a # the .to_a transforms the hash into an array of pairs.
+ end
+
+end
+
diff --git a/BrainPortal/lib/boutiques_forced_output_browse_path.rb b/BrainPortal/lib/boutiques_forced_output_browse_path.rb
index 383d2b675..e86f7980c 100644
--- a/BrainPortal/lib/boutiques_forced_output_browse_path.rb
+++ b/BrainPortal/lib/boutiques_forced_output_browse_path.rb
@@ -75,8 +75,25 @@ def name_and_type_for_output_file(output, pathname)
config = descriptor.custom_module_info('BoutiquesForcedOutputBrowsePath')
browse_path = config[output.id] # "a/b/c"
return [ name, type ] if browse_path.blank? # no configured browse_path for this output
+ browse_path = apply_value_keys(browse_path) # replaces [XYZ] strings with values from params
combined = (Pathname.new(browse_path) + name).to_s # "a/b/c/name"
[ combined, type ]
end
+ # Returns a modified version of browse_path where the
+ # substrings [XYZ] are replaced by the value-keys of
+ # the invoke structure.
+ def apply_value_keys(browse_path)
+ descriptor = self.descriptor_for_save_results
+
+ # Prepare the substitution hash
+ substitutions_by_token = descriptor.build_substitutions_by_tokens_hash(
+ JSON.parse(File.read(self.invoke_json_basename))
+ )
+
+ new_browse_path = descriptor.apply_substitutions(browse_path, substitutions_by_token)
+
+ new_browse_path
+ end
+
end
diff --git a/BrainPortal/lib/boutiques_input_value_fixer.rb b/BrainPortal/lib/boutiques_input_value_fixer.rb
new file mode 100644
index 000000000..dee765ea2
--- /dev/null
+++ b/BrainPortal/lib/boutiques_input_value_fixer.rb
@@ -0,0 +1,157 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2024
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This module allows one to fix some of input parameters to specific constand values
+# The fixed input(s) would no longer be shown to the user in the form.
+# The optional inputs assigned null value will be removed
+# (do not use with mandatory input parameters)
+#
+# In the descriptor, the spec would look like:
+#
+# "custom": {
+# "cbrain:integrator_modules": {
+# "BoutiquesInputValueFixer": {
+# "n_cpus": 1,
+# "mem": "4G",
+# "optional_custom_query": null,
+# "level": "group"
+# }
+# }
+# }
+#
+# Our main use case is resource related parameter which seldom participate
+# in dependencies and constraints.
+# Therefore we remove parameters from the form in a straightforward fashion
+# and do not address indirect or transitive dependencies. For instance,
+# if say i1-requires->i2-requires->i3 while i2 is deleted, dependency
+# of i3 on i1 no longer be reflected in web form UI dynamically
+module BoutiquesInputValueFixer
+
+ # Note: to access the revision info of the module,
+ # you need to access the constant directly, the
+ # object method revision_info() won't work.
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+
+ # the hash of input parameter values to be fixed or, if value is null, to be omited
+ def fixed_values
+ self.boutiques_descriptor.custom_module_info('BoutiquesInputValueFixer')
+ end
+
+ # deletes fixed inputs listed in the custom 'integrator_modules'
+ def descriptor_without_fixed_inputs(descriptor)
+ # input parameters are marked by null values will be excluded from the command line
+ # other will be given fixed values during execution; neither should appear in web form UI
+
+ fixed_input_ids = fixed_values.keys
+ descriptor_dup = descriptor.dup
+ fully_removed = fixed_input_ids.select do |i_id| # this variables are flagged to be removed rather than assigned value
+ # in the spec, so they will be treated slightly different
+ input = descriptor_dup.input_by_id(i_id)
+ value = fixed_values[i_id]
+ value.nil? || (input.type == 'Flag') && (value.presence.to_s.strip =~ /0|null|false/i || value.blank?)
+ end
+
+ # generally speaking, boutiques input groups can have three different constraints,
+ # here we address 1) mutually exclusive constraint, which is the only one present in GUI javascript (the rest are evaluated
+ # after submission of the form), 2) 'one is required' constraint that affect the initial rendering of the form
+ # ( though IMHO red stars or other indicators to draw user attention should eventually implemented )
+
+ descriptor_dup.groups.each do |g| # filter groups, relax restriction to ensure that form can still be submitted
+ members = g.members - fixed_input_ids
+
+ # some actions at least some group members are actually assigned vals rather than deleted
+ if (fixed_input_ids & g.members - fully_removed).present? #
+ # since one input parameter is already selected permanently (fixed),
+ # we can drop one_is_required constraint
+ g.one_is_required = false # as result group's checkbox is unselected in form rendering
+
+ # as one of mutually exclusive parameters is selected by setting a fixed value
+ # the rest of group should be disabled, no remaining
+ # Whenever deleting all remaining parameters of the group is preferred to disabling
+ # boutiques author/admin can modify the list of fixed values accordingly
+ block_inputs(descriptor_dup, members) if g.mutually_exclusive
+ g.mutually_exclusive = false # will make form's javascript smaller/faster
+
+ # all-or-none constraint is seldom used, does not affect form itself,
+ # and only validated after the form submission
+ # and generally presents less pitfalls
+ # Therefore, at the moment, 'all or none' constraint is not addressed here
+
+ end
+ g.members = members
+ end
+
+ # remove empty groups
+ descriptor_dup.groups = descriptor_dup.groups.select {|g| g.members.present? }
+
+ # delete fixed inputs
+ descriptor_dup.inputs = descriptor_dup.inputs.select { |i| ! fixed_values.key?(i.id) } # filter out fixed inputs
+
+ # straight-forward delete of fixed inputs from dependencies.
+ # Indirect and transitive dependencies may be lost for UI
+ # but will be validated after form submission
+ descriptor_dup.inputs.each do |i|
+ i.requires_inputs = i.requires_inputs - fixed_input_ids if i.requires_inputs.present?
+ i.disables_inputs = i.disables_inputs - fixed_input_ids if i.disables_inputs.present?
+ i.value_requires.each { |v, a| i.value_requires[v] -= fixed_input_ids } if i.value_requires.present?
+ i.value_disables.each { |v, a| i.value_disables[v] -= fixed_input_ids } if i.value_disables.present?
+ end
+
+ descriptor_dup
+ end
+
+ # this blocks an input parameter by 'self-disabling', rather than explicitly deleting it
+ # it is a bit unorthodox yet expected to be used seldom
+ def block_inputs(descriptor, input_ids)
+ input_ids.each do |input_id|
+ input = descriptor.input_by_id(input_id) rescue next
+ input.disables_inputs ||= []
+ input.disables_inputs |= [input_id]
+ input.name += " ( unavailable )"
+ end
+ end
+
+ # adjust descriptor to allow check the number of supplied files
+ def descriptor_for_before_form
+ descriptor_without_fixed_inputs(super)
+ end
+
+ # prevent from showing/submitting fixed inputs in the form
+ def descriptor_for_form
+ descriptor_without_fixed_inputs(super)
+ end
+
+ # show all the params
+ def descriptor_for_show_params
+ self.invoke_params.merge!(fixed_values) # shows 'fixed' parameters, user would not be able to edit them
+ super # standard values
+ end
+
+ # validation step - the original boutiques with combined invocation, for the greatest accuracy
+ # note, error messages might involve fixed variables
+ def after_form
+ self.invoke_params.merge!(fixed_values.compact) # put back fixed values into invocation, if needed
+ super # Performs standard processing
+ end
+
+end
diff --git a/BrainPortal/lib/boutiques_output_cache_cleaner.rb b/BrainPortal/lib/boutiques_output_cache_cleaner.rb
index 0e1311e82..24f2dacf1 100644
--- a/BrainPortal/lib/boutiques_output_cache_cleaner.rb
+++ b/BrainPortal/lib/boutiques_output_cache_cleaner.rb
@@ -57,12 +57,16 @@ def descriptor_with_special_input(descriptor)
new_input = BoutiquesSupport::Input.new(
"name" => "Enable Output Cache Cleaning",
"id" => "cbrain_enable_output_cache_cleaner",
- "description" => "If set, the cached content of produced outputs are erased when the task completes successfuly.",
+ "description" => <<-DESC,
+ If set, the cached content of produced outputs are erased when the task completes successfully.
+ This does not affect the actual outputs of the task, only their cached content on the execution server.
+ Turn off this option only if you need a copy of the outputs on the server, e.g. for further processing.
+ DESC
"type" => "Flag",
"optional" => false,
"default-value" => true,
)
- descriptor.inputs <<= new_input
+ descriptor.inputs << new_input
# Add new group with that input
groups = descriptor.groups || []
@@ -76,7 +80,7 @@ def descriptor_with_special_input(descriptor)
)
groups << cb_mod_group
end
- cb_mod_group.members <<= new_input.id
+ cb_mod_group.members << new_input.id
descriptor.groups = groups
descriptor
diff --git a/BrainPortal/lib/boutiques_post_processing_cleaner.rb b/BrainPortal/lib/boutiques_post_processing_cleaner.rb
index a0f75272e..fd1f630b2 100644
--- a/BrainPortal/lib/boutiques_post_processing_cleaner.rb
+++ b/BrainPortal/lib/boutiques_post_processing_cleaner.rb
@@ -37,10 +37,17 @@
# "work",
# "*.tmp",
# "[OUTFILE_NAME].*.work"
-# [
+# ]
# }
# }
#
+# This module will also erase EXT3 capture filesystems created by CBRAIN
+# if the basename of the filesystem, as configured in the ToolConfig, matches
+# one of the entries in this module's configuration. So in the code above,
+# the content of the file ".capt_work.ext3" would also be erased if a capture
+# filesystem was configured for "work". Patterns are not supported for
+# this feature.
+#
module BoutiquesPostProcessingCleaner
# Note: to access the revision info of the module,
@@ -81,6 +88,15 @@ def save_results
end
end
+ # Also erase ext3 catpure files IF they match one of the patterns
+ ext3capture_basenames.each do |basename, _|
+ next unless patterns.include?(basename) # must be exact match, e.g. 'work' == 'work'
+ fs_name = ".capt_#{basename}.ext3" # e.g. .capt_work.ext3, see also in cluster_task.rb
+ next unless File.file?(fs_name)
+ self.addlog("Cleaning up EXT3 capture filesystem '#{fs_name}' in work directory")
+ File.delete(fs_name) rescue nil
+ end
+
true
end
diff --git a/BrainPortal/lib/boutiques_save_std_out_std_err.rb b/BrainPortal/lib/boutiques_save_std_out_std_err.rb
new file mode 100644
index 000000000..83933ba68
--- /dev/null
+++ b/BrainPortal/lib/boutiques_save_std_out_std_err.rb
@@ -0,0 +1,152 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This module allow to save the stdout and stderr files of a Boutiques task
+#
+# To use this module, you need to add the following lines in the descriptor:
+# "cbrain:integrator_modules": {
+# "BoutiquesSaveStdOutStdErr": {
+# "stdout_output_dir": "",
+# "stderr_output_dir": "path/to/dir"
+# }
+# }
+#
+# In case of a MultilevelSshDataProvider the "path/to/dir" will be use to save the output.
+# In case of a no MultilevelSshDataProvider the "path/to/dir" will be ignored.
+#
+# The value of the key "stdout_output_dir" and "stderr_output_dir" can be set to an empty string,
+# in this situation the files will be saved directly in the root folder of the DataProvider.
+#
+# The output files will be saved as a LogFile with the name: -.std(out|err)
+#
+module BoutiquesSaveStdOutStdErr
+
+ # Note: to access the revision info of the module,
+ # you need to access the constant directly, the
+ # object method revision_info() won't work.
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # This method overrides the one in BoutiquesClusterTask.
+ # Save the stdout and stderr files of the task as output files.
+ # The files will be saved as a child of the first input file.
+ def save_results
+ # Get the folder where to save the log files from the descriptor
+ descriptor = self.descriptor_for_save_results
+ module_info = descriptor.custom_module_info('BoutiquesSaveStdOutStdErr')
+
+ # Get parent file to set stderr and stdout as children of first input file
+ main_input_id = descriptor.file_inputs.first.id
+ file_id = self.invoke_params[main_input_id]
+ parent_file = Userfile.find(file_id)
+
+ # Save stdout
+ science_stdout_basename = science_stdout_basename(self.run_number)
+ save_stdout_basename = (Pathname.new(module_info["stdout_output_dir"]) +
+ "#{self.pretty_type}-#{self.bname_tid_dashed}.stdout").to_s
+ stdout_file = save_log_file(science_stdout_basename, save_stdout_basename, parent_file)
+ self.params["_cbrain_output_cbrain_stdout"] = [stdout_file.id] if stdout_file
+
+ # Save stderr
+ science_stderr_basename = science_stderr_basename(self.run_number)
+ save_stderr_basename = (Pathname.new(module_info["stderr_output_dir"]) +
+ "#{self.pretty_type}-#{self.bname_tid_dashed}.stderr").to_s
+ stderr_file = save_log_file(science_stderr_basename, save_stderr_basename, parent_file)
+ self.params["_cbrain_output_cbrain_stderr"] = [stderr_file.id] if stderr_file
+
+ self.save
+
+ super
+ end
+
+ # Add the stdout and stderr files to the descriptor
+ # for the show page of the task.
+ def descriptor_for_show_params #:nodoc:
+ descriptor = super.dup
+
+ stdout_file = BoutiquesSupport::OutputFile.new({
+ "id" => "cbrain_stdout",
+ "name" => "Standard output",
+ "description" => "Standard output of the tool",
+ "optional" => true
+ })
+
+ stderr_file = BoutiquesSupport::OutputFile.new({
+ "id" => "cbrain_stderr",
+ "name" => "Standard error",
+ "description" => "Standard error of the tool",
+ "optional" => true
+ })
+
+ descriptor["output-files"] << stdout_file if !descriptor.output_files.any? { |f| f.id == "cbrain_stdout" }
+ descriptor["output-files"] << stderr_file if !descriptor.output_files.any? { |f| f.id == "cbrain_stderr" }
+
+ descriptor
+ end
+
+ private
+
+ # Returns a Userfile object, prepared with a browse_path if necessary.
+ # To do that it can override the attlist to add a browse_path and then
+ # call the standard safe_userfile_find_or_new() method.
+ def safe_logfile_find_or_new(klass, attlist)
+ name = attlist[:name]
+ return safe_userfile_find_or_new(klass, attlist) if ! (name.include? "/") # if there is no relative path, just do normal stuff
+
+ # Find all the info we need
+ attlist = attlist.dup
+ dp_id = attlist[:data_provider_id] || self.results_data_provider_id
+ dp = DataProvider.find(dp_id)
+ pn = Pathname.new(name) # "a/b/c/hello.txt"
+
+ # Make adjustements to name and browse_path
+ attlist[:name] = pn.basename.to_s # "hello.txt"
+ if dp.has_browse_path_capabilities?
+ attlist[:browse_path] = pn.dirname.to_s # "a/b/c"
+ self.addlog "BoutiquesSaveStdErrOut: result DataProvider browse_path for Stderr and Stdout will be '#{pn.dirname}'"
+ else
+ attlist[:browse_path] = nil # ignore the browse_path
+ self.addlog "BoutiquesSaveStdErrOut: result DataProvider doesn't have multi-level capabilities, ignoring forced browse_path for Stderr and Stdout '#{pn.dirname}'."
+ end
+
+ # Invoke the standard code
+ return safe_userfile_find_or_new(klass, attlist)
+ end
+
+ # Save the log with original_file_path to filename as
+ # a child of parent_file on the results data provider.
+ def save_log_file(original_file_path, filename, parent_file) #:nodoc:
+ self.addlog("Saving log file #{filename}")
+ file = safe_logfile_find_or_new(LogFile, :name => filename)
+
+ if ! file.save
+ self.addlog("Could not save back log file #{filename}")
+ return nil
+ end
+
+ file.cache_copy_from_local_file(original_file_path)
+ file.move_to_child_of(parent_file)
+ self.addlog("Saved log file #{filename}")
+
+ file
+ end
+end
+
diff --git a/BrainPortal/lib/boutiques_support.rb b/BrainPortal/lib/boutiques_support.rb
index 670634031..2b51c5426 100644
--- a/BrainPortal/lib/boutiques_support.rb
+++ b/BrainPortal/lib/boutiques_support.rb
@@ -60,6 +60,13 @@ def self.validate(json)
Group = Class.new(RestrictedHash) { allowed_keys group_prop_names }
ContainerImage = Class.new(RestrictedHash) { allowed_keys cont_prop_names }
+ # Adds a comparison operator to these subobjects so that
+ # they can be sorted.
+ # See also Hash.resorted in the CBRAIN core extensions.
+ [ Input, OutputFile, Group ].each do |klass|
+ klass.send(:define_method, :'<=>') { |other| self.id <=> other.id }
+ end
+
def initialize(hash={})
super(hash)
# The following re-assignment transforms hashed into subobjects (like OutputFile etc)
@@ -68,7 +75,7 @@ def initialize(hash={})
self.output_files = self.output_files || []
self.groups = self.groups || []
self.custom = self.custom || {}
- self.container_image &&= self.container_image # we need to to remain nil if already nil
+ self.container_image &&= self.container_image # we need it to remain nil if already nil
self
end
@@ -203,6 +210,7 @@ def build_substitutions_by_tokens_hash(invoke_structure)
self.inputs.map do |input|
next nil if input.value_key.blank?
value = invoke_structure[input.id]
+ value = input.default_value if value.nil?
next nil if value.nil?
[ input.value_key, value ]
end.compact.to_h
@@ -227,6 +235,172 @@ def apply_substitutions(string, substitutions_by_tokens, to_strip=[])
newstring
end
+ PRETTY_ORDER_TOP = %w(
+ name
+ tool-version
+ author
+ description
+ url
+ descriptor-url
+ online-platform-urls
+ doi
+ tool-doi
+ shell
+ command-line
+ schema-version
+ container-image
+ inputs
+ groups
+ output-files
+ error-codes
+ suggested-resources
+ tags
+ tests
+ custom
+ )
+ PRETTY_ORDER_INPUT = %w(
+ id
+ name
+ description
+ type
+ optional
+ integer
+ minimum
+ exclusive-minimum
+ maximum
+ exclusive-maximum
+ list
+ list-separator
+ min-list-entries
+ max-list-entries
+ default-value
+ command-line-flag
+ command-line-flag-separator
+ value-key
+ value-choices
+ value-disables
+ disables-inputs
+ requires-inputs
+ )
+ PRETTY_ORDER_OUTPUT = %w(
+ id
+ name
+ description
+ optional
+ list
+ command-line-flag
+ value-key
+ path-template
+ path-template-stripped-extensions
+ )
+ PRETTY_ORDER_GROUP = %w(
+ id
+ name
+ description
+ all-or-none
+ one-is-required
+ members
+ )
+
+ # Returns a dup() of the current descriptor, but with
+ # the fields re-ordered so as to create a 'pretty'
+ # layout when printed out (as JSON, YAML etc).
+ #
+ # The order puts things like the name, description, command
+ # version number etc near the top, then then inputs, the
+ # groups, the outputs, and the custom sections.
+ def pretty_ordered
+ ordered = Hash.new # we use a plain hash to hold the newly ordered elems.
+ selfcopy = self.dup
+ PRETTY_ORDER_TOP.each { |k| ordered[k] = selfcopy.delete(k).dup if selfcopy.has_key?(k) }
+ selfcopy.each { |k,v| puts "Top miss: #{k}" ; ordered[k] = v.dup }
+ final = self.class.new(ordered)
+
+ # Order fields in each input
+ final.inputs = final.inputs.map do |input|
+ ordered = Hash.new
+ selfcopy = input.dup
+ PRETTY_ORDER_INPUT.each { |k| ordered[k] = selfcopy.delete(k).dup if selfcopy.has_key?(k) }
+ selfcopy.each { |k,v| puts "Inp miss: #{k}" ; ordered[k] = v.dup }
+ input.class.new(ordered)
+ end
+
+ # Order fields in each output-file
+ final.output_files = final.output_files.map do |output|
+ ordered = Hash.new
+ selfcopy = output.dup
+ PRETTY_ORDER_OUTPUT.each { |k| ordered[k] = selfcopy.delete(k).dup if selfcopy.has_key?(k) }
+ selfcopy.each { |k,v| puts "Out miss: #{k}" ; ordered[k] = v.dup }
+ output.class.new(ordered)
+ end
+
+ # Order fields in each group
+ final.groups = final.groups.map do |group|
+ ordered = Hash.new
+ selfcopy = group.dup
+ PRETTY_ORDER_GROUP.each { |k| ordered[k] = selfcopy.delete(k).dup if selfcopy.has_key?(k) }
+ selfcopy.each { |k,v| puts "Group miss: #{k}" ; ordered[k] = v.dup }
+ group.class.new(ordered)
+ end
+
+ final
+ end
+
+ # Returns a JSON text version of the descriptor but with
+ # the fields aligned with pretty whitespaces, e.g.
+ # instead of
+ #
+ # "name": "megatool",
+ # "tool-version": "3.14.15926",
+ # "url": "https://example.com",
+ #
+ # we get
+ #
+ # "name": "megatool",
+ # "tool-version": "3.14.15926",
+ # "url": "https://example.com",
+ def super_pretty_json
+
+ # Internally, the alignment is made by padding property names with '|'
+ # and then stripping them out of the normal JSON generated.
+ pad_keys = ->(hash,length) do
+ hash.transform_keys! { |k| k.to_s.size >= length ? k : k + ('|' * (length-k.size) ) }
+ end
+ maxkeylength = ->(hash) { hash.keys.map(&:to_s).map(&:size).max }
+
+ # Returns a modified hash with keys all padded with '|'
+ max_pad_keys = ->(hash) do
+ copy = HashWithIndifferentAccess.new.merge(hash.dup)
+ max = maxkeylength.(copy)
+ pad_keys.(copy,max)
+ copy
+ end
+
+ final = HashWithIndifferentAccess.new.merge(self.dup)
+
+ final['inputs'].map! { |input| max_pad_keys.(input) }
+ final['output-files'].map! { |output| max_pad_keys.(output) } if final['output-files'].present?
+ final['groups'].map! { |group| max_pad_keys.(group) } if final['groups'].present?
+ final.delete('groups') if final['groups'].blank?
+
+ final['container-image'] &&= max_pad_keys.(final['container-image'])
+ final['custom'] &&= max_pad_keys.(final['custom'])
+
+ final = max_pad_keys.(final)
+
+ json_with_bars = JSON.pretty_generate(final)
+ new_json = json_with_bars
+ .gsub( /\|+": / ) do |bars|
+ spaces = bars.size - 3; '": ' + (' ' * spaces)
+ end
+
+ new_json
+ end
+
+ #------------------------------------------------------
+ # Aditional methods for the sub-objects of a descriptor
+ #------------------------------------------------------
+
class Input
# This method return the parameter name for the input.
diff --git a/BrainPortal/lib/boutiques_task_logs_copier.rb b/BrainPortal/lib/boutiques_task_logs_copier.rb
new file mode 100644
index 000000000..bc2504c35
--- /dev/null
+++ b/BrainPortal/lib/boutiques_task_logs_copier.rb
@@ -0,0 +1,187 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+# This module implement a special step that is executed BEFORE the standard
+# post-processing code of CBRAIN is triggered. A Boutiques descriptor can
+# include it in the custom section like this:
+#
+# "custom": {
+# "cbrain:integrator_modules": {
+# "BoutiquesTaskLogsCopier": {
+# "stdout": "local/path/[PARAM1]/ses*/basename[PARAM2]_{taskid}_stdout.log",
+# "stderr": "local/path/[PARAM1]/ses*/basename[PARAM2]_{taskid}_stderr.log",
+# "runtime": "blah/blah/runtime.kv",
+# "descriptor": "blah/blah/descriptor.json",
+# "invoke": "blah/blah/params.json",
+# "jobscript": "blah/blah/cbrain_script.sh"
+# }
+# }
+# }
+#
+# The module's behavior is to copy some CBRAIN-specific files (e.g. the STDOUT and STDERR
+# capture files of the task) and install them in some subdirectory that (normally)
+# will be saved as an output. It can also copy other useful configuration files,
+# as shown in the example above.
+#
+# The copy code will get triggered before CBRAIN runs its normal post-processing
+# code, so before it is aware whether or not the task completed successfully,
+# or failed.
+#
+# Configuration errors in the paths will raise a fatal exception. A missing
+# output directory path, however, will only generate a warning within
+# the task's processing logs.
+#
+# The pathnames patterns provided can include standard filesystem glob elements
+# and Boutiques value-key parameters. The module will try to make sure that
+# only one subdirecty path matches the parent location specified by the path, though
+# it will attempt the create the last component of the parent if necessary.
+#
+# Several examples of what is supported:
+#
+# # Direct path:
+# "abc/def/stdout.log"
+#
+# # Paths with value-keys taken from Boutiques parameters:
+# "[OUTPUT_DIR]/[INPUT_FILE].stdout"
+# "work/[SUBJECT_ID]/logs/stdout_[SUBJECT_ID].log"
+#
+# # Path with a value-key AND a glob to find a subdirectory ses-N :
+# "work/[SUBJECT_ID]/ses-*/logs/stdout_[SUBJECT_ID].log"
+#
+# When tring to find the final path for the copied file, the parent dir
+# is initially globbed(), and if a single directory is returned,
+# it will be used. If none are found, the parent of THAT
+# is checked and if it exists, the missing last component directory
+# will be created. E.g. for the last example above, if
+# "work/sub-1234/ses-2" exists but "work/sub-1234/ses-2/logs" doesn't
+# exist, the "logs" subdirectory will be created.
+module BoutiquesTaskLogsCopier
+
+ # Note: to access the revision info of the module,
+ # you need to access the constant directly, the
+ # object method revision_info() won't work.
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+ # This method overrides the one in BoutiquesClusterTask.
+ # It will attempt to copy the stdout and stderr files
+ # that CBRAIN captured, and then invoke the normal
+ # post processing code.
+ def save_results
+
+ # Get the cleaning paths patterns from the descriptor
+ descriptor = self.descriptor_for_save_results
+ destpaths = descriptor.custom_module_info('BoutiquesTaskLogsCopier')
+
+ # Copy STDOUT and STDERR, if possible
+ install_std_log_file(science_stdout_basename, destpaths[:stdout], "stdout")
+ install_std_log_file(science_stderr_basename, destpaths[:stderr], "stderr")
+
+ # Copy Boutiques configuration files
+ install_std_log_file(boutiques_json_basename, destpaths[:descriptor], "boutiques descriptor")
+ install_std_log_file(invoke_json_basename, destpaths[:invoke], "boutiques parameters")
+
+ # Copy Runtime info file
+ install_std_log_file(runtime_info_basename, destpaths[:runtime], "runtime info")
+
+ # Copy sbatch/qsub script
+ install_std_log_file(science_script_basename, destpaths[:jobscript], "jobscript")
+
+ # Performs standard processing
+ super
+ end
+
+ # Try to install a file +stdlogfile+ into the destination path
+ # specified by +destpath+ . destpath can be a pattern
+ # with glob components and Boutiques parameter value-keys, and
+ # must be at least one level deep.
+ #
+ # See the examples at the top of the module.
+ def install_std_log_file(stdlogfile, destpath, typeinfo)
+
+ # If we have not configured a capture path, do nothing.
+ return if destpath.blank?
+
+ # If for some reason the task's work directory doesn't have
+ # the required file, ignore it too.
+ return if ! File.file?(stdlogfile)
+
+ descriptor = self.descriptor_for_save_results
+
+ # Prepare the substitution hash and apply it
+ substitutions_by_token = descriptor.build_substitutions_by_tokens_hash(
+ JSON.parse(File.read(self.invoke_json_basename))
+ )
+ destpath = descriptor.apply_substitutions(destpath, substitutions_by_token)
+ destpath = Pathname.new(destpath).cleanpath
+
+ # Extract the prefix subdirectory paths (which can be globbed) and the basename
+ prefixglob = destpath.parent
+ basename = destpath.basename
+
+ # Sanity checks. These errors should never happen because the paths
+ # and patterns are normally configured by the administrator, who
+ # should know better than to misconfigure the module or
+ # point at paths outside the task's work directory.
+ cb_error "Misconfigured module BoutiquesTaskLogsCopier for #{typeinfo} with absolute path pattern '#{destpath}'" if destpath.absolute?
+ if prefixglob.to_s.blank? || prefixglob.to_s == '.'
+ cb_error "Misconfigured module BoutiquesTaskLogsCopier without a prefix subdirectory for #{typeinfo} '#{destpath}'"
+ end
+
+ # Try to find one and only one directory where to install the file.
+ dirglobs = Pathname.glob(prefixglob)
+
+ # If we get a pattern that matches several places, we can't do anything.
+ if dirglobs.size > 1
+ self.addlog "Warning: too many intermediate subdirectories match pattern '#{prefixglob}'; #{typeinfo} file not saved."
+ return
+ end
+
+ # If we can't find a match at all, maybe we can find a match with just the
+ # parent directory and we can create the final component.
+ if dirglobs.empty?
+ parent_of_prefix_glob = prefixglob.parent
+ parent_of_prefix_dirs = Pathname.glob(parent_of_prefix_glob)
+ if parent_of_prefix_dirs.size != 1
+ self.addlog "Warning: cannot find intermediate subdirectories matching pattern '#{prefixglob}'; #{typeinfo} file not saved."
+ return
+ end
+ mkdir_path = (Pathname.new(parent_of_prefix_dirs.first) + prefixglob.basename).to_s
+ Dir.mkdir(mkdir_path)
+ dirglobs = [ mkdir_path ]
+ end
+
+ destdir = dirglobs.first
+ if ! path_is_in_workdir?(destdir)
+ self.addlog "Misconfigured module BoutiquesTaskLogsCopier: path pattern '#{destpath}' is outside of the task's workdirectory; #{typeinfo} file not saved."
+ return
+ end
+
+ self.addlog "Copying #{typeinfo} file to '#{destdir}/#{basename}'"
+ FileUtils.copy_file(stdlogfile, "#{destdir}/#{basename}")
+
+ end
+
+end
+
+
+
+
diff --git a/BrainPortal/lib/cbrain_extensions/hash_extensions/conversions.rb b/BrainPortal/lib/cbrain_extensions/hash_extensions/conversions.rb
index 93c7db853..0292f75ff 100644
--- a/BrainPortal/lib/cbrain_extensions/hash_extensions/conversions.rb
+++ b/BrainPortal/lib/cbrain_extensions/hash_extensions/conversions.rb
@@ -81,6 +81,25 @@ def to_api_xml(options = {})
to_xml({ :dasherize => false, :root => root_tag }.merge(options))
end
+ # Returns a dup of the hash, where the keys are sorted, and
+ # any values that are arrays are also sorted. Applies these
+ # rules recursively. Assumes that all keys and all array values
+ # are things that can be compared, otherwise this will crash.
+ def resorted
+ res = self.class.new
+ self.keys.sort.each do |key|
+ val = self[key]
+ if val.is_a?(Hash)
+ res[key] = val.resorted
+ elsif val.is_a?(Array)
+ res[key] = val.sort.map { |x| x.respond_to?(:resorted) ? x.resorted : x }
+ else
+ res[key] = val
+ end
+ end
+ res
+ end
+
end
end
end
diff --git a/BrainPortal/lib/cbrain_task_generators/schema_task_generator.rb b/BrainPortal/lib/cbrain_task_generators/schema_task_generator.rb
index df9e80abc..48edeed38 100644
--- a/BrainPortal/lib/cbrain_task_generators/schema_task_generator.rb
+++ b/BrainPortal/lib/cbrain_task_generators/schema_task_generator.rb
@@ -261,9 +261,10 @@ def register(task)
container_engine.capitalize!
return if container_engine == "Singularity" && !resource.singularity_present?
return if container_engine == "Docker" && (!resource.docker_present? && !resource.singularity_present?)
-
+
# If Docker engine isn't present use Singularity
container_engine = "Singularity" if (container_engine == "Docker" && !resource.docker_present?)
+ container_index = 'docker://' if container_index == 'index.docker.io' # old convention
ToolConfig.new(
:tool_id => task.tool.id,
diff --git a/BrainPortal/app/views/service/info.html.erb b/BrainPortal/lib/data_provider_test_connection_error.rb
similarity index 57%
rename from BrainPortal/app/views/service/info.html.erb
rename to BrainPortal/lib/data_provider_test_connection_error.rb
index 545527fb5..7d2b8fefd 100644
--- a/BrainPortal/app/views/service/info.html.erb
+++ b/BrainPortal/lib/data_provider_test_connection_error.rb
@@ -1,9 +1,8 @@
-<%-
#
# CBRAIN Project
#
-# Copyright (C) 2008-2012
+# Copyright (C) 2008-2023
# The Royal Institution for the Advancement of Learning
# McGill University
#
@@ -20,18 +19,13 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
--%>
-<% title 'Service Info' %>
+# This class provides an exception class for
+# representing a user key connection error.
+# At the moment it does not guaranty though that connection is good.
+class DataProviderTestConnectionError < CbrainError
+
+ Revision_info=CbrainFileRevision[__FILE__] #:nodoc:
+
+end
-
CBRAIN Web Service Information
-
Basic Information
-
-
Name
<%= @info[:name] %>
-
Version
<%= @info[:version] %>
-
Synopsis
<%= @info[:synopsis] %>
-
Release Time
<%= @info[:releaseTime] %>
-
Research Subject
<%= @info[:researchSubject] %>
-
Support Email
<%= @info[:supportEmail] %>
-
Tags
<%= @info[:tags].join(", ") %>
-
diff --git a/BrainPortal/lib/models_report.rb b/BrainPortal/lib/models_report.rb
index 66894aca9..533298c52 100644
--- a/BrainPortal/lib/models_report.rb
+++ b/BrainPortal/lib/models_report.rb
@@ -154,7 +154,7 @@ def self.search_for_token(token, user=current_user) #:nodoc:
is_numeric = token =~ /\A\d+\z/ || token == "-9998877" # ... because we'll find by ID
- file_scope = Userfile .find_all_accessible_by_user(user) .order(:name)
+ file_scope = Userfile .find_all_accessible_by_user(user, :access_requested => :read).order(:name)
task_scope = CbrainTask .find_all_accessible_by_user(user) .order(:id)
rr_scope = RemoteResource.find_all_accessible_by_user(user) .order(:name)
dp_scope = DataProvider .find_all_accessible_by_user(user) .order(:name)
diff --git a/BrainPortal/lib/portal_sanity_checks.rb b/BrainPortal/lib/portal_sanity_checks.rb
index def73adf4..6d9f40179 100644
--- a/BrainPortal/lib/portal_sanity_checks.rb
+++ b/BrainPortal/lib/portal_sanity_checks.rb
@@ -373,5 +373,23 @@ def self.ensure_scratch_data_provider_exists #:nodoc:
scratch.meta['no_viewers'] = 'on' # files can't be viewed in interface
end
+ def self.ensure_expired_messages_are_purged #:nodoc:
+
+ #-----------------------------------------------------------------------------
+ puts "C> Ensuring expired Messages are purged"
+ #-----------------------------------------------------------------------------
+
+ todel = Message.all.to_a
+ .select { |m| m.expiry.present? }
+ .select { |m| m.expiry < Time.now }
+ if todel.size > 0
+ puts "C> \t - There are #{todel.size} messages to delete."
+ todel.each { |m| m.destroy }
+ else
+ puts "C> \t - There are no messages to delete."
+ end
+
+ end
+
end
diff --git a/BrainPortal/lib/portal_system_checks.rb b/BrainPortal/lib/portal_system_checks.rb
index f8ac0757c..6d3466baa 100644
--- a/BrainPortal/lib/portal_system_checks.rb
+++ b/BrainPortal/lib/portal_system_checks.rb
@@ -119,11 +119,11 @@ def self.z000_ensure_we_have_a_local_ssh_agent #:nodoc:
puts "C> Making sure we have a CBRAIN key for the agent..."
#----------------------------------------------------------------------------
- cbrain_identity_file = "#{CBRAIN::Rails_UserHome}/.ssh/id_cbrain_portal"
+ cbrain_identity_file = "#{CBRAIN::Rails_UserHome}/.ssh/id_cbrain_ed25519"
if ! File.exists?(cbrain_identity_file)
puts "C> \t- Creating identity file '#{cbrain_identity_file}'."
with_modified_env('SSH_ASKPASS' => '/bin/true', 'DISPLAY' => 'none:0.0') do
- system("/bin/bash","-c","ssh-keygen -t rsa -f #{cbrain_identity_file.bash_escape} -C 'CBRAIN_Portal_Key' /dev/null 2>/dev/null")
+ system("/bin/bash","-c","ssh-keygen -t ed25519 -f #{cbrain_identity_file.bash_escape} -C 'CBRAIN_Portal_Key' /dev/null 2>/dev/null")
end
end
@@ -132,8 +132,8 @@ def self.z000_ensure_we_have_a_local_ssh_agent #:nodoc:
else
CBRAIN.with_unlocked_agent
curkeys=agent.list_keys
- if curkeys.size > 0
- puts "C> \t- Identity already present in agent: #{curkeys[0]}"
+ if digest = curkeys.detect { |string| string.to_s =~ /\(ED25519\)/i } # = not ==
+ puts "C> \t- Identity already present in agent: #{digest}"
else
ok = with_modified_env('SSH_ASKPASS' => '/bin/true', 'DISPLAY' => 'none:0.0') do
agent.add_key_file(cbrain_identity_file) rescue nil # will raise exception if anything wrong
@@ -146,6 +146,20 @@ def self.z000_ensure_we_have_a_local_ssh_agent #:nodoc:
end
end
end
+
+ # Add old key if it exists.
+ old_identity_file = "#{CBRAIN::Rails_UserHome}/.ssh/id_cbrain_portal"
+ if File.exists?(old_identity_file)
+ ok_old = with_modified_env('SSH_ASKPASS' => '/bin/true', 'DISPLAY' => 'none:0.0') do
+ agent.add_key_file(old_identity_file) rescue nil
+ end
+ if ok_old
+ puts "C> \t- Added OLD identity to agent from file: '#{old_identity_file}'."
+ else
+ puts "C> \t- WARNING: cannot add OLD identity from file: '#{old_identity_file}'."
+ end
+ end
+
end
diff --git a/BrainPortal/lib/ssh_key.rb b/BrainPortal/lib/ssh_key.rb
index 9f996d348..3024072ed 100644
--- a/BrainPortal/lib/ssh_key.rb
+++ b/BrainPortal/lib/ssh_key.rb
@@ -39,7 +39,7 @@ class SshKey
CONFIG = { #:nodoc:
:ssh_keys_dir => (Rails.root rescue nil) ? "#{Rails.root.to_s}/user_keys" : "/not/yet/configured",
:exec_ssh_keygen => `bash -c "type -p ssh-keygen"`.strip,
- :ssh_keygen_type => "rsa",
+ :ssh_keygen_type => "ed25519",
:debug => false,
}
@@ -124,7 +124,7 @@ def validate_files!
raise RuntimeError.new("Public file for SSH Key '#{@name}' does not exist.") unless
File.exists?(pub_path) && File.size(pub_path) > 50
raise RuntimeError.new("Private file for SSH Key '#{@name}' does not exist.") unless
- File.exists?(priv_path) && File.size(priv_path) > 1000
+ File.exists?(priv_path) && File.size(priv_path) > 300
true
end
@@ -211,7 +211,7 @@ def private_key_path
# Returns the private key (in SSH format)
def private_key(i_know_what_i_am_doing = false)
- raise RuntimeError("Private key access denied") unless i_know_what_i_am_doing == 'I Know What I Am Doing'
+ raise RuntimeError.new("Private key access denied") unless i_know_what_i_am_doing == 'I Know What I Am Doing'
File.read(private_key_path)
end
diff --git a/BrainPortal/lib/tasks/boutiques_rewrite.rake b/BrainPortal/lib/tasks/boutiques_rewrite.rake
new file mode 100644
index 000000000..1013840d3
--- /dev/null
+++ b/BrainPortal/lib/tasks/boutiques_rewrite.rake
@@ -0,0 +1,98 @@
+
+#
+# CBRAIN Project
+#
+# Copyright (C) 2008-2023
+# The Royal Institution for the Advancement of Learning
+# McGill University
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#
+
+namespace :cbrain do
+ namespace :boutiques do
+ desc "Reads a Boutiques descriptor and writes it back with adjustments"
+
+ task :rewrite, [:action] => :environment do |t,args|
+
+ args.with_defaults(:action => 'reorder')
+ action = args.action
+ raise "This task's action must be 'reorder' (default), or 'pad' or 'pad+reorder'" unless
+ action.match /\A(reorder|pad|pad\+reorder)\z/
+
+ # There is no good way to provide standard command line
+ # args to a rake task, so I have to butcher ARGV myself.
+ args = ARGV.size > 1 ? ARGV[1..ARGV.size-1] : [] # remove 'rake'
+ while args.size > 0 && args[0] =~ /^cbrain:boutiques|^-/ # remove options and task name
+ args.shift
+ end
+
+ # Usage
+ if args.size != 1
+ puts <<-USAGE
+ Usage:
+ rake cbrain:boutiques:rewrite boutiques.json
+ rake cbrain:boutiques:rewrite[reorder] boutiques.json # default
+ rake cbrain:boutiques:rewrite[pad] boutiques.json
+ rake cbrain:boutiques:rewrite[pad+reorder] boutiques.json
+
+ This task will read the content of 'boutiques.json' and
+ write back 'new_boutiques.json'.
+
+ The single option is a keyword that determines which rewriting
+ procedure to perform.
+
+ With 'reorder' (the default), the properties are reordered
+ in a pretty way.
+
+ With 'pad', the JSON produced will contain extra spaces to
+ align all the values together.
+
+ Unfortunately, because of the way rake tasks work, the
+ full path to 'boutiques.json' must be provided, or a
+ path relative to CBRAIN's BrainPortal directory.
+
+ USAGE
+ exit 1
+ end
+
+ filename = args.shift
+ pathname = Pathname.new(filename)
+ newfile = pathname.dirname + "new_#{pathname.basename}"
+
+ puts "Reading file #{filename}..."
+ btq = BoutiquesSupport::BoutiquesDescriptor.new_from_file(filename)
+
+ if action =~ /reorder/
+ puts "Re-ordering..."
+ btq = btq.pretty_ordered
+ btq.delete('groups') if btq.groups.blank? # stupid btq spec say it must be completely absent
+ json = JSON.pretty_generate(btq)
+ end
+
+ if action =~ /pad/
+ puts "Padding values..."
+ btq.delete('groups') if btq.groups.blank? # stupid btq spec say it must be completely absent
+ json = btq.super_pretty_json
+ end
+
+ puts "Saving #{newfile}..."
+ File.open(newfile.to_s,"w") { |fh| fh.write json }
+
+ puts "Done."
+
+ end
+ end
+end
+
diff --git a/BrainPortal/lib/tasks/cbrain_nagios_checker.rake b/BrainPortal/lib/tasks/cbrain_nagios_checker.rake
index 55285a5b8..37a68833b 100644
--- a/BrainPortal/lib/tasks/cbrain_nagios_checker.rake
+++ b/BrainPortal/lib/tasks/cbrain_nagios_checker.rake
@@ -43,6 +43,7 @@ namespace :cbrain do
task :dps => :environment do
CbrainSystemChecks.check([:a002_ensure_Rails_can_find_itself])
+ PortalSystemChecks.check([:z000_ensure_we_have_a_local_ssh_agent])
# Restores STDOUT and STDERR so that nagios
# can capture our pretty message at the end.
diff --git a/BrainPortal/lib/tasks/resource_usage_serialization.rake b/BrainPortal/lib/tasks/resource_usage_serialization.rake
index 9f28673a2..8f29ec069 100644
--- a/BrainPortal/lib/tasks/resource_usage_serialization.rake
+++ b/BrainPortal/lib/tasks/resource_usage_serialization.rake
@@ -212,6 +212,9 @@ namespace :cbrain do
# Main processing loop for all classes
klass_names.each do |klass_name|
+ puts "\n-------------------------------------------------------"
+ puts "Reloading ResourceUsage records for class #{klass_name}"
+
# Find all files for klass_name
globpattern = Rails.root + "data_dumps" + "#{klass_name}.*.yaml*" # matches .gz too
files = Dir.glob(globpattern)
diff --git a/BrainPortal/lib/view_helpers.rb b/BrainPortal/lib/view_helpers.rb
index 9ad850b66..fbf791d1e 100644
--- a/BrainPortal/lib/view_helpers.rb
+++ b/BrainPortal/lib/view_helpers.rb
@@ -99,8 +99,6 @@ def pretty_elapsed(numseconds,options = {})
]
components = components.select { |c| c[0] > 0 }
- components.pop while components.size > 0 && components[-1] == 0
- components.shift while components.size > 0 && components[0] == 0
if options[:num_components]
while components.size > options[:num_components]
diff --git a/BrainPortal/public/401.html b/BrainPortal/public/401.html
index 58e70975f..84d633712 100644
--- a/BrainPortal/public/401.html
+++ b/BrainPortal/public/401.html
@@ -3,63 +3,45 @@
You are not authorized to view this page (401)
-
+
+
+
+
-
-
You are not authorized to view this page.
-
+
You are not authorized to view this page.
Stop trying to be a hacker and get back to work.
diff --git a/BrainPortal/public/404.html b/BrainPortal/public/404.html
index ab1bceb20..a5bd42279 100644
--- a/BrainPortal/public/404.html
+++ b/BrainPortal/public/404.html
@@ -3,63 +3,45 @@
The page you were looking for doesn't exist (404)
-
+
+
+
+
-
-
The page you were looking for doesn't exist.
-
+
The page you were looking for doesn't exist.
You may have mistyped the address or the page may have moved.
diff --git a/BrainPortal/public/422.html b/BrainPortal/public/422.html
index 7e30a2f0b..3ac3de251 100644
--- a/BrainPortal/public/422.html
+++ b/BrainPortal/public/422.html
@@ -3,63 +3,45 @@
The change you wanted was rejected (422)
-
+
+
+
+
-
-
The change you wanted was rejected.
-
+
The change you wanted was rejected.
Maybe you tried to change something you didn't have access to.
diff --git a/BrainPortal/public/500.html b/BrainPortal/public/500.html
index a022b7da4..929fda37e 100644
--- a/BrainPortal/public/500.html
+++ b/BrainPortal/public/500.html
@@ -3,63 +3,45 @@
We're sorry, but something went wrong (500)
-
+
+
+
+
-
-
We're sorry, but something went wrong.
-
+
We're sorry, but something went wrong.
We've been notified about this issue and we'll take a look at it shortly.
diff --git a/BrainPortal/public/502.html b/BrainPortal/public/502.html
index 0d4d1417c..d9575c2b9 100644
--- a/BrainPortal/public/502.html
+++ b/BrainPortal/public/502.html
@@ -3,65 +3,48 @@
We're sorry, this server is down for maintenance (502)
-
+
+
+
+
-
-
We're sorry, this server is down for maintenance.
-
-
Please return in a few minutes.
- Maintenance periods usually last between 5 and 30 minutes.
+
We're sorry, this server is down for maintenance.
+
+ Please return in a few minutes.
+
+ Maintenance periods usually last between 5 and 30 minutes.
+