Commit 72ba3a04 authored by Oxana Smirnova's avatar Oxana Smirnova
Browse files

Merge branch 'next' of https://source.coderefinery.org/nordugrid/arc into next

parents f5d4d05b e8f1b35c
Pipeline #9727 passed with stage
in 90 minutes and 31 seconds
......@@ -13,16 +13,13 @@
* [ ] Check that the supported platforms are up to date here: https://download.nordugrid.org/repos-6.html and here: http://www.nordugrid.org/documents/arc6/common/repos/repository.html (@waananen)
* [ ] Add version to NorduGrid Bugzilla products (@waananen)
* [ ] Publish release notes on web (@oxana)
* [ ] Packages are ready to be pushed (@waananen)
* [ ] Announce release on nordugrid-discuss, wlcg-arc-ce-discuss and urt-discuss@mailman.egi.eu email lists (@maikenp)
* [ ] Update News about release: http://www.nordugrid.org/ (@oxana)
* [ ] Update link to latest release: http://www.nordugrid.org/arc/ (@oxana)
* [ ] Push packages to the repo (@waananen)
Important checks for developer
* Will your changes impact performance? Action: test and fix.
* Will your changes be backwards incompatible? Action: only merge into next branch - or bring up issue in a meeting/chat with release manager.
* ...
Important bugs/changes to clear before code-freeze:
* [ ] ...
......
......@@ -2433,8 +2433,6 @@ AC_CONFIG_FILES([Makefile
src/utils/python/arc/gen_paths_dist.sh
src/utils/python/arc/utils/Makefile
src/utils/python/arc/control/Makefile
src/utils/python/arc/ssm/Makefile
src/utils/python/arc/ssm/ssmsend
src/utils/hed/wsdl2hed.1
src/utils/hed/arcplugin.1
src/utils/hed/Makefile
......
......@@ -336,7 +336,7 @@ Depends:
nordugrid-arc-arcctl (= ${source:Version}),
${pydeps:Depends}
XB-Python-Version: ${python:Versions}
Description: ARC Resource-coupled EXecution service - Python LRMS backends
Description: ARC Resource-coupled EXecution service - Community RTEs framework
NorduGrid is a collaboration aiming at development, maintenance and
support of the middleware, known as the Advanced Resource
Connector (ARC).
......
......@@ -82,7 +82,7 @@ Files: src/services/a-rex/infoproviders/glite-info-provider-ldap
Copyright: Members of the EGEE Collaboration 2004
License: Apache-2.0
Files: src/utils/python/arc/ssm/*
Files: src/utils/python/arc/thirdparty/ssm/*
Copyright: 2012 STFC
License: Apache-2.0
......
......@@ -7,7 +7,6 @@ debian/tmp/usr/lib/arc/inputcheck
debian/tmp/usr/lib/arc/jura-ng
debian/tmp/usr/lib/arc/smtp-send
debian/tmp/usr/lib/arc/smtp-send.sh
debian/tmp/usr/lib/arc/ssmsend
debian/tmp/usr/share/arc/cancel-*-job
debian/tmp/usr/share/arc/scan-*-job
......@@ -70,8 +69,6 @@ debian/tmp/usr/share/man/man8/a-rex-backtrace-collect.8
debian/tmp/usr/share/man/man8/arc-blahp-logger.8
debian/tmp/usr/share/man/man8/gm-*.8
debian/tmp/usr/lib/python?.*/site-packages/arc/ssm
debian/tmp/usr/lib/python?.*/site-packages/arc/control/AccountingDB.py*
debian/tmp/usr/lib/python?.*/site-packages/arc/control/AccountingPublishing.py*
debian/tmp/usr/lib/python?.*/site-packages/arc/control/Accounting.py*
......
......@@ -215,7 +215,7 @@ ifeq ($(PYVER),2)
override_dh_python2:
dh_python2
echo "pydeps:Depends=python-ldap, python-isodate, python-stompy" \
echo "pydeps:Depends=python-ldap, python-isodate" \
>> debian/nordugrid-arc-arex.substvars
echo "pydeps:Depends=python-dns" \
>> debian/nordugrid-arc-community-rtes.substvars
......
......@@ -510,19 +510,11 @@ Requires: %{name}-arcctl = %{version}-%{release}
Requires: %{name}-arcctl-service = %{version}-%{release}
Requires: %{name}-plugins-needed = %{version}-%{release}
%if %{py3default}
Requires: python3-dirq
Requires: python3-isodate
Requires: python3-ldap
Requires: python3-stomppy
%else
Requires: python-dirq
Requires: python-isodate
Requires: python-ldap
%if %{?fedora}%{!?fedora:0} >= 26 || %{?rhel}%{!?rhel:0} >= 8
Requires: python2-stomppy
%else
Requires: stomppy
%endif
%endif
Provides: %{name}-cache-service = %{version}-%{release}
Obsoletes: %{name}-cache-service < 6.0.0
......@@ -1511,9 +1503,7 @@ service arc-infosys-ldap condrestart > /dev/null 2>&1 || :
%dir %{_localstatedir}/spool/arc
%dir %{_localstatedir}/spool/arc/ssm
%dir %{_localstatedir}/spool/arc/urs
%{_libexecdir}/%{pkgdir}/ssmsend
%if %{py3default}
%{python3_sitearch}/%{pkgdir}/ssm
%{python3_sitearch}/%{pkgdir}/control/AccountingDB.py
%{python3_sitearch}/%{pkgdir}/control/AccountingPublishing.py
%{python3_sitearch}/%{pkgdir}/control/Accounting.py
......@@ -1529,7 +1519,6 @@ service arc-infosys-ldap condrestart > /dev/null 2>&1 || :
%{python3_sitearch}/%{pkgdir}/control/__pycache__/Jobs.*
%{python3_sitearch}/%{pkgdir}/control/__pycache__/RunTimeEnvironment.*
%else
%{python2_sitearch}/%{pkgdir}/ssm
%{python2_sitearch}/%{pkgdir}/control/AccountingDB.py*
%{python2_sitearch}/%{pkgdir}/control/AccountingPublishing.py*
%{python2_sitearch}/%{pkgdir}/control/Accounting.py*
......
This diff is collapsed.
This diff is collapsed.
=================================
Advanced Resource Connector (ARC)
=================================
Release Notes for NorduGrid ARC 6.11.0 22.04.2021
=======================================================
We are happy to announce the release of ARC 6.11.0
Highlights in this release
--------------------------
This is a release mainly to push out a handful of smaller improvements and bug fixes.
There is however one longstanding "feature" which has been improved in this release.
Namely the queue selection when there is no queue infomation from the client side.
From now on, if no queue is requested/selected from the client side,
only queues configured in arc.conf will be among the possible queues to use.
This will avoid the surprising feature that the queue selection in this case is up to the batch system,
and not ARC for such clients. This is not an issue when the ARC client is used.
Note that this means that from now you must ensure that the authgroups you wish to allow
submission from, are authorized in the queue you intend the authgroup to submit to. Otherwise
the submission will be rejected.
For details, please see the documentation on the subject:
http://nordugrid.pages.coderefinery.org/doc/admins/details/auth_and_mapping.html#queue-selection-rules-and-queue-configuration
The following Bugzilla tickets have been adressed or solved
------------------------------------------------------------
* 4006 Select first matching queue from arc.conf when none is specified in job description
* 4002 Error parsing IPv4 address in EPSV response
* 3995 diag file not found
* 3994 Jobs don't move scratch files (OpenPBS-20.0)
* 3928 Wrong memory request for PBS back-end
Documentation
==============
The ARC 6 documentation can be found at http://www.nordugrid.org/arc/arc6
If you miss something or have questions, please contact us!
Installing ARC 6
=================
We recommend to install ARC release using the Nordugrid repository.
* Start with the basics: http://www.nordugrid.org/arc/arc6/admins/try_arc6.html
* Get production ready: http://www.nordugrid.org/arc/arc6/admins/arc6_install_guide.html
Note that if you instead install from EPEL for RHEL 7 compatible systems,
the ARC 6 packages can be found as nordugrid-arc6-* and not nordugrid-arc-*
This was needed in order to supply both ARC 5 already available for RHEL7 and the new ARC 6.
We provide binary builds for all supported versions of
* Debian (i386 and amd64)
* Fedora (x86_64)
* Centos/RHEL (x86_64)
* Ubuntu (i386 and amd64, both LTS and interim releases) - from Ubuntu 20.04 no i386 support
In addition we build nightly packages for older platforms.
Future support of ARC 5-series
===============================
As of June 2020 no more security updates are provided to ARC 5.
Also since the release of ARC 6 in June 2019, no more development on the ARC 5 code base has been provided.
Production Sites already running ARC 5 will still get operational site support in form of
deployment and configuration troubleshooting help via GGUS until end June 2021.
However, we may recommend upgrading to ARC 6 depending on the issues the site is facing.
ARC5 is available in EPEL7 and will stay there. EPEL8 will only contain ARC 6.
To install ARC 6 from EPEL7 the package-names are of type: nordugrid-arc6-*
Getting in touch
==================
If you need help with the configuration file or have any other questions related to ARC 6, please contact us via
* Email: nordugrid-discuss@nordugrid.org or wlcg-arc-ce-discuss@cern.ch
* Skype channel dedicated to ARC 6 support: https://join.skype.com/dyf3A6Uutjy2
Known issues
==============
* In Centos8 the openldap server is not installable because slapd is not available in the RHEL8 release (Bugzilla 3944).
As a consequence, the LDAP-based server-side information system interface of the ARC-CE will not be available either.
If interaction with the ARC-CE relies on LDAP-based information schemas (glue1.3 or nordugrid schema),
then we recommend that you stay with Centos7 for the time being.
* Related to the new accounting system introduded in release 6.4.0:
* Accounting records created by ARC 6.4.0 are affected by a bug causing missing benchmark values:
https://bugzilla.nordugrid.org./show_bug.cgi?id=3921#c3
Recommendations are to update to a newer version of ARC.
The bugreport gives instructions on how to manually fix the wrong benchmark values.
......@@ -43,6 +43,22 @@ We provide binary builds for all supported versions of
In addition we build nightly packages for older platforms.
Contributing
=============
You can submit pull requests to our public repo at https://github.com/nordugrid/arc which mirrors
our Gitlab repo https://github.com/nordugrid/arc.
The pull requests will be automatically copied over to our Gitlab repo where they will be
merged into the source-code if accepted.
Getting in touch
==================
If you need help with the configuration file or have any other questions related to ARC 6, please contact us via
* Email: nordugrid-discuss@nordugrid.org or wlcg-arc-ce-discuss@cern.ch
* Skype channel dedicated to ARC 6 support: https://join.skype.com/dyf3A6Uutjy2
Future support of ARC 5-series
===============================
As of June 2020 no more security updates are provided to ARC 5.
......@@ -56,11 +72,6 @@ ARC5 is available in EPEL7 and will stay there. EPEL8 will only contain ARC 6.
To install ARC 6 from EPEL7 the package-names are of type: nordugrid-arc6-*
Getting in touch
==================
If you need help with the configuration file or have any other questions related to ARC 6, please contact us via
* Email: nordugrid-discuss@nordugrid.org or wlcg-arc-ce-discuss@cern.ch
* Skype channel dedicated to ARC 6 support: https://join.skype.com/dyf3A6Uutjy2
Known issues
......
......@@ -112,11 +112,14 @@
## they are missing (or commented out). Configuration parameters with "undefined" defaults takes
## no values. Furthermore, configuration options within disabled blocks takes no values either.
##
## Configuration blocks related to authorization are ORDER-DEPENDENT! The authorization blocks
## [authgroup:name] MUST be defined before used in the blocks such as [mapping], [arex/ws/jobs] or
## [gridftp/jobs]. The same rule applies to defining legacy [userlist:name] blocks.
## Furthermore, the order of the authorization blocks itself may have influence over
## authorization decisions!
## Configuration blocks are ORDER-DEPENDENT. To be safe, please use the order as indicated in the list
## of blocks in this reference. This is especially important for configuration blocks related to
## authorization and mapping. The order dependency is also honoured within options inside a certain block.
##
## This means for instance that configuration blocks related to authorization MUST appear before used in
## the blocks such as [mapping], [arex/ws/jobs] or [gridftp/jobs]. Order dependency within a block is
## for instance important when it comes to authorization decisions, as the first matching rule is used.
## For more details see the specific block reference.
##
## Below we give a detailed description of all the configuration options of the
## different configuration blocks. Every configuration option is described
......@@ -1679,13 +1682,13 @@
#[arex/jura/apel:egi_prod_apel]
## *targeturl = url - The service endpoint URL of the APEL accounting server.
## Targets can be found using "arcctl accounting apel-brokers --ssl".
## default: undefined
#targeturl=https://mq.cro-ngi.hr:6162
#targeturl=https://msg.argo.grnet.gr
## topic = topic_name - Sets the name of the APEL topic to which JURA will publish the
## accounting records.
## default: /queue/global.accounting.cpu.central
## AMS destination topic for compute element is 'gLite-APEL'
## default: gLite-APEL
#topic=/queue/global.accounting.test.cpu.central
## *gocdb_name = name - Can be used to specify the GOCDB name of the resource.
......@@ -1695,12 +1698,11 @@
## apel_messages = type - (introduced in 6.4.0) Define what kind of records JURA
## will send to APEL services during regular publishing process.
## Possible cases are: per-job EMI CAR records ("urs"), APEL summary records ("summaries")
## or "both".
## Possible cases are: per-job EMI CAR records ("urs"), APEL summary records ("summaries").
## APEL Sync messages are always generated.
## NOTE that on the heavy loaded sites with 10k+ jobs weekly "summaries" generation has
## a performance penalty in ARC < 6.8. It is advised to use "urs" in this case.
## allowedvalues: urs summaries both
## allowedvalues: urs summaries
## default: summaries
#apel_messages=urs
## CHANGE: NEW in 6.4.0.
......@@ -1715,8 +1717,9 @@
## urbatchsize = number - JURA sends usage records not one-by-one, but in batches.
## This options sets the size of a batch. Zero value means unlimited batch size.
## default: 1000
#urbatchsize=1000
## 500 is recommended to avoid too large messages using AMS
## default: 500
#urbatchsize=500
## urdelivery_frequency = seconds - (introduced in 6.4.0) Add optional minimal
## treshold of the interval between subsequent records publishing to this target.
......
......@@ -86,7 +86,7 @@ namespace Arc {
public:
JobStateProcessor(std::list<Job*>& jobs): jobs(jobs) {}
virtual void operator()(std::string const& id, XMLNode node) {
virtual void operator()(std::string const& id, XMLNode node, URL const& query_url) {
std::string job_id = node["id"];
std::string job_state = node["state"];
if(!job_state.empty() && !job_id.empty()) {
......@@ -97,11 +97,11 @@ namespace Arc {
if(job_id == id) {
(*itJob)->State = JobStateARCREST(job_state);
// (*itJob)->RestartState = ;
// (*itJob)->StageInDir = (std::string)aid["esainfo:StageInDirectory"];
// (*itJob)->StageOutDir = (std::string)aid["esainfo:StageInDirectory"];
// (*itJob)->SessionDir = (std::string)aid["esainfo:StageInDirectory"];
std::string baseUrl = query_url.ConnectionURL()+query_url.Path()+"/"+job_id;
(*itJob)->StageInDir = baseUrl;
(*itJob)->StageOutDir = baseUrl;
(*itJob)->SessionDir = baseUrl;
// (*itJob)->DelegationID.push_back ;
// (*itJob)->JobID = ;
break;
}
}
......@@ -112,14 +112,49 @@ namespace Arc {
std::list<Job*>& jobs;
};
JobStateProcessor stateProcessor(jobs);
class JobInfoProcessor: public InfoNodeProcessor {
public:
JobInfoProcessor(std::list<Job*>& jobs): jobs(jobs) {}
virtual void operator()(std::string const& id, XMLNode node, URL const& query_url) {
std::string job_id = node["id"];
XMLNode job_info = node["info_document"];
if(job_info && !job_id.empty()) {
for(std::list<Job*>::iterator itJob = jobs.begin(); itJob != jobs.end(); ++itJob) {
std::string id = (*itJob)->JobID;
std::string::size_type pos = id.rfind('/');
if(pos != std::string::npos) id.erase(0,pos+1);
if(job_id == id) {
(*itJob)->SetFromXML(job_info["ComputingActivity"]);
std::string baseUrl = query_url.ConnectionURL()+query_url.Path()+"/"+job_id;
(*itJob)->StageInDir = baseUrl;
(*itJob)->StageOutDir = baseUrl;
(*itJob)->SessionDir = baseUrl;
for(XMLNode state = job_info["ComputingActivity"]["State"]; (bool)state; ++state) {
std::string stateStr = state;
if(strncmp(stateStr.c_str(), "arcrest:", 8) == 0) {
(*itJob)->State = JobStateARCREST(stateStr.substr(8));
break;
}
}
break;
}
}
}
}
private:
std::list<Job*>& jobs;
};
JobInfoProcessor infoProcessor(jobs);
Arc::URL currentServiceUrl;
std::list<std::string> IDs;
for (std::list<Job*>::const_iterator it = jobs.begin(); it != jobs.end(); ++it) {
if(!currentServiceUrl || (currentServiceUrl != GetAddressOfResource(**it))) {
if(!IDs.empty()) {
std::list<std::string> fakeIDs = IDs;
ProcessJobs(usercfg, currentServiceUrl, "status", 200, IDs, IDsProcessed, IDsNotProcessed, stateProcessor);
ProcessJobs(usercfg, currentServiceUrl, "info", 200, IDs, IDsProcessed, IDsNotProcessed, infoProcessor);
}
currentServiceUrl = GetAddressOfResource(**it);
}
......@@ -128,7 +163,7 @@ namespace Arc {
}
if(!IDs.empty()) {
std::list<std::string> fakeIDs = IDs;
ProcessJobs(usercfg, currentServiceUrl, "status", 200, IDs, IDsProcessed, IDsNotProcessed, stateProcessor);
ProcessJobs(usercfg, currentServiceUrl, "info", 200, IDs, IDsProcessed, IDsNotProcessed, infoProcessor);
}
}
......@@ -332,7 +367,7 @@ namespace Arc {
} else {
IDsProcessed.push_back(*it);
}
infoNodeProcessor(*it, job_item);
infoNodeProcessor(*it, job_item, statusUrl);
IDs.erase(it);
}
}
......
......@@ -31,6 +31,9 @@ namespace Arc {
class InfoNodeProcessor {
public:
virtual void operator()(std::string const& job_id, XMLNode info_node, URL const& query_url) {
return operator()(job_id, info_node);
};
virtual void operator()(std::string const& job_id, XMLNode info_node) {};
};
......
......@@ -406,32 +406,41 @@ namespace ArcDMCRucio {
cJSON_Delete(root);
return DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "Failed to parse Rucio response");
}
cJSON *rses = cJSON_GetObjectItem(root, "rses");
if (!rses) {
logger.msg(ERROR, "No RSE information returned in Rucio response: %s", content);
cJSON *pfns = cJSON_GetObjectItem(root, "pfns");
if (!pfns) {
logger.msg(ERROR, "No pfns returned in Rucio response: %s", content);
cJSON_Delete(root);
return DataStatus(DataStatus::ReadResolveError, EARCRESINVAL, "Failed to parse Rucio response");
}
cJSON *rse = rses->child;
while (rse) {
cJSON *replicas = rse->child;
while(replicas) {
if(replicas->type == cJSON_String || replicas->valuestring) {
URL loc(std::string(replicas->valuestring));
if(loc) {
// Add URL options to replicas
for (std::map<std::string, std::string>::const_iterator opt = url.CommonLocOptions().begin();
opt != url.CommonLocOptions().end(); opt++)
loc.AddOption(opt->first, opt->second, false);
for (std::map<std::string, std::string>::const_iterator opt = url.Options().begin();
opt != url.Options().end(); opt++)
loc.AddOption(opt->first, opt->second, false);
AddLocation(loc, loc.ConnectionURL());
cJSON *pfn = pfns->child;
while (pfn) {
if (pfn->type == cJSON_String || pfn->string) {
URL loc(std::string(pfn->string));
// Check if there is a filter on access latency
cJSON *replicatype = cJSON_GetObjectItem(pfn, "type");
if (!replicatype || replicatype->type != cJSON_String || !replicatype->valuestring) {
logger.msg(WARNING, "Cannot determine replica type for %s", loc.str());
} else {
logger.msg(DEBUG, "%s: replica type %s", loc.str(), std::string(replicatype->valuestring));
if (url.Option("accesslatency", "") != "" &&
Arc::lower(url.Option("accesslatency")) != Arc::lower(std::string(replicatype->valuestring))) {
logger.msg(INFO, "Skipping %s replica %s", std::string(replicatype->valuestring), loc.str());
pfn = pfn->next;
continue;
}
}
replicas = replicas->next;
if (loc) {
// Add URL options to replicas
for (std::map<std::string, std::string>::const_iterator opt = url.CommonLocOptions().begin();
opt != url.CommonLocOptions().end(); opt++)
loc.AddOption(opt->first, opt->second, false);
for (std::map<std::string, std::string>::const_iterator opt = url.Options().begin();
opt != url.Options().end(); opt++)
loc.AddOption(opt->first, opt->second, false);
AddLocation(loc, loc.ConnectionURL());
}
}
rse = rse->next;
pfn = pfn->next;
}
cJSON *fsize = cJSON_GetObjectItem(root, "bytes");
if (!fsize || fsize->type == cJSON_NULL) {
......
......@@ -391,7 +391,7 @@ namespace Arc {
std::string::size_type pos = FindProtocolSeparator(newurl);
if(pos != std::string::npos) {
// Absolute URL
operator=(URL(newurl));
operator=(URL(newurl, encoded));
return;
}
// Path
......
......@@ -50,6 +50,7 @@ namespace Arc {
valid_url_options.insert("rucioaccount");
valid_url_options.insert("failureallowed");
valid_url_options.insert("relativeuri");
valid_url_options.insert("accesslatency");
}
DataPoint::~DataPoint() {}
......
......@@ -54,7 +54,7 @@ ConfigTLSMCC::ConfigTLSMCC(XMLNode cfg,bool client) {
credential_ = (std::string)(cfg["Credential"]);
if(client) {
// Client is using safest setup by default
cipher_list_ = "TLSv1:SSLv3:!eNULL:!aNULL";
cipher_list_ = "ALL:!SSLv2:!eNULL:!aNULL";
hostname_ = (std::string)(cfg["Hostname"]);
XMLNode protocol_node = cfg["Protocol"];
while((bool)protocol_node) {
......@@ -70,13 +70,13 @@ ConfigTLSMCC::ConfigTLSMCC(XMLNode cfg,bool client) {
protocols_ = (std::string)(cfg["Protocols"]);
} else {
// Server allows client to choose. But requires authentication.
cipher_list_ = "TLSv1:SSLv3:eNULL:!aNULL";
cipher_list_ = "ALL:!SSLv2:eNULL:!aNULL";
}
if(cfg["Encryption"] == "required") {
} else if(cfg["Encryption"] == "preferred") {
cipher_list_ = "TLSv1:SSLv3:eNULL:!aNULL";
cipher_list_ = "ALL:!SSLv2:eNULL:!aNULL";
} else if(cfg["Encryption"] == "optional") {
cipher_list_ = "eNULL:TLSv1:SSLv3:!aNULL";
cipher_list_ = "eNULL:ALL:!SSLv2:!aNULL";
} else if(cfg["Encryption"] == "off") {
cipher_list_ = "eNULL:!aNULL";
}
......
......@@ -78,11 +78,11 @@ namespace DataStaging {
logger->addDestinations(get_log_destinations());
// check that endpoints can be handled
if (!source_endpoint) {
if (!source_endpoint || !(*source_endpoint)) {
logger->msg(Arc::ERROR, "Could not handle endpoint %s", source);
return;
}
if (!destination_endpoint) {
if (!destination_endpoint || !(*destination_endpoint)) {
logger->msg(Arc::ERROR, "Could not handle endpoint %s", destination);
return;
}
......
......@@ -103,7 +103,7 @@ sub collect_job_data() {
return if $alljobdata_initialized;
$alljobdata_initialized = 1;
$ENV{_condor_CONDOR_Q_ONLY_MY_JOBS}='false';
my ($out, $err, $ret) = condor_run('condor_q -constraint "NiceUser == False" -format "ClusterId = %V\n" ClusterId -format "ProcId = %V\n" ProcId -format "JobStatus = %V\n" JobStatus -format "CurrentHosts = %V\n" CurrentHosts -format "LastRemoteHost = %V\n" LastRemoteHost -format "RemoteHost = %V\n" RemoteHost -format "ImageSize = %V\n" ImageSize -format "RemoteWallClockTime = %V\n" RemoteWallClockTime -format "RemoteUserCpu = %V\n" RemoteUserCpu -format "RemoteSysCpu = %V\n" RemoteSysCpu -format "JobTimeLimit = %V\n" JobTimeLimit -format "JobCpuLimit = %V\n" JobCpuLimit -format "HoldReasonCode = %V\n\n" HoldReasonCode');
my ($out, $err, $ret) = condor_run('condor_q -format "ClusterId = %V\n" ClusterId -format "ProcId = %V\n" ProcId -format "JobStatus = %V\n" JobStatus -format "CurrentHosts = %V\n" CurrentHosts -format "LastRemoteHost = %V\n" LastRemoteHost -format "RemoteHost = %V\n" RemoteHost -format "ImageSize = %V\n" ImageSize -format "RemoteWallClockTime = %V\n" RemoteWallClockTime -format "RemoteUserCpu = %V\n" RemoteUserCpu -format "RemoteSysCpu = %V\n" RemoteSysCpu -format "JobTimeLimit = %V\n" JobTimeLimit -format "JobCpuLimit = %V\n" JobCpuLimit -format "HoldReasonCode = %V\n\n" HoldReasonCode');
return if $out =~ m/All queues are empty/;
error("Failed collecting job information.") if $ret;
for (split /\n\n+/, $out) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment