Commit e4e51ce8 authored by Maiken's avatar Maiken

Merge branch 'bugz-3924' into 'master'

BUGZ-3924 support for read-only caches

See merge request nordugrid/arc!985
parents 78ed0821 14106623
......@@ -996,12 +996,13 @@
### The [arex/cache] block #########################################
## This subblock enables and configures the cache functionality of A-REX.
## A-REX can cache input files downloaded as part of stagein proces of grid jobs
## A-REX can cache input files downloaded as part of the stage-in process of grid jobs
## so that subsequent jobs requiring the same file don’t have to download it again.
## The cached file will be symlinked (or copied) into the session directory of the job.
## To disable to cache functionality simply comment out the [arex/cache] config block.
## It is a good idea to have the cache on its own separate file system that is shared with the nodes.
## For more information about the cache functionality of A-REX consult the sysadmin guide.
## For more information about the cache functionality of A-REX consult the Data Cache
## technical description in the online documentation.
#[arex/cache]
## CHANGE: NEW block in 6.0.0.
......@@ -1013,12 +1014,18 @@
## If "link_path" is set to "." files are not soft-linked, but copied to session
## directory.
## If a cache directory needs to be drained, then "link_path" should specify "drain",
## in which case no new files will be added to the cache. Restart of arex also needed.
## in which case no new files will be added to the cache and files in the cache
## will no longer be used.
## Setting "link_path" to "readonly" ensures that no new files are written to
## this cache, but existing files can still be used.
## Draining and read-only caches are not cleaned by the A-REX cache cleaner.
## A restart of A-REX is required when changing cache options.
## multivalued
## default: undefined
#cachedir=/scratch/cache
#cachedir=/shared/cache /frontend/jobcache
#cachedir=/fs1/cache drain
## CHANGE: Added readonly option in 6.7
##
##
### end of the [arex/cache] #############################################
......
......@@ -46,11 +46,12 @@ namespace Arc {
// make a vector of one item and call _init
std::vector<std::string> caches;
std::vector<std::string> draining_caches;
std::vector<std::string> readonly_caches;
if (!cache_path.empty())
caches.push_back(cache_path);
// if problem in init, clear _caches so object is invalid
if (!_init(caches, draining_caches, id, job_uid, job_gid))
if (!_init(caches, draining_caches, readonly_caches, id, job_uid, job_gid))
_caches.clear();
}
......@@ -60,9 +61,10 @@ namespace Arc {
gid_t job_gid) {
std::vector<std::string> draining_caches;
std::vector<std::string> readonly_caches;
// if problem in init, clear _caches so object is invalid
if (!_init(caches, draining_caches, id, job_uid, job_gid))
if (!_init(caches, draining_caches, readonly_caches, id, job_uid, job_gid))
_caches.clear();
}
......@@ -72,13 +74,27 @@ namespace Arc {
uid_t job_uid,
gid_t job_gid) {
std::vector<std::string> readonly_caches;
// if problem in init, clear _caches so object is invalid
if (!_init(caches, draining_caches, id, job_uid, job_gid))
if (!_init(caches, draining_caches, readonly_caches, id, job_uid, job_gid))
_caches.clear();
}
FileCache::FileCache(const std::vector<std::string>& caches,
const std::vector<std::string>& draining_caches,
const std::vector<std::string>& readonly_caches,
const std::string& id,
uid_t job_uid,
gid_t job_gid) {
// if problem in init, clear _caches so object is invalid
if (!_init(caches, draining_caches, readonly_caches, id, job_uid, job_gid))
_caches.clear();
}
bool FileCache::_init(const std::vector<std::string>& caches,
const std::vector<std::string>& draining_caches,
const std::vector<std::string>& readonly_caches,
const std::string& id,
uid_t job_uid,
gid_t job_gid) {
......@@ -130,6 +146,25 @@ namespace Arc {
cache_params.cache_link_path = "";
_draining_caches.push_back(cache_params);
}
// for each readonly cache
for (int i = 0; i < (int)readonly_caches.size(); i++) {
std::string cache = readonly_caches[i];
std::string cache_path = cache.substr(0, cache.find(" "));
if (cache_path.empty()) {
logger.msg(ERROR, "No read-only cache directory specified");
return false;
}
// tidy up paths - take off any trailing slashes
if (cache_path.rfind("/") == cache_path.length()-1) cache_path = cache_path.substr(0, cache_path.length()-1);
// add this cache to our list
struct CacheParameters cache_params;
cache_params.cache_path = cache_path;
cache_params.cache_link_path = "";
_readonly_caches.push_back(cache_params);
}
return true;
}
......@@ -497,13 +532,15 @@ namespace Arc {
bool FileCache::Release() const {
// go through all caches (including draining caches)
// go through all caches (including read-only and draining caches)
// and remove per-job dirs for our job id
std::vector<std::string> job_dirs;
for (int i = 0; i < (int)_caches.size(); i++)
job_dirs.push_back(_caches[i].cache_path + "/" + CACHE_JOB_DIR + "/" + _id);
for (int i = 0; i < (int)_draining_caches.size(); i++)
job_dirs.push_back(_draining_caches[i].cache_path + "/" + CACHE_JOB_DIR + "/" + _id);
for (int i = 0; i < (int)_readonly_caches.size(); i++)
job_dirs.push_back(_readonly_caches[i].cache_path + "/" + CACHE_JOB_DIR + "/" + _id);
for (int i = 0; i < (int)job_dirs.size(); i++) {
std::string job_dir = job_dirs[i];
......@@ -743,8 +780,8 @@ namespace Arc {
struct CacheParameters FileCache::_chooseCache(const std::string& url) const {
// When there is only one cache directory
if (_caches.size() == 1) return _caches.front();
// When there is only one cache directory
if (_caches.size() == 1 && _readonly_caches.empty()) return _caches.front();
std::string hash(_getHash(url));
struct stat fileStat;
......@@ -753,8 +790,17 @@ namespace Arc {
std::string c_file = i->cache_path + "/" + CACHE_DATA_DIR +"/" + hash;
if (FileStat(c_file, &fileStat, true)) {
return *i;
}
}
}
// check the read-only caches
for (std::vector<struct CacheParameters>::const_iterator i = _readonly_caches.begin(); i != _readonly_caches.end(); ++i) {
std::string c_file = i->cache_path + "/" + CACHE_DATA_DIR +"/" + hash;
if (FileStat(c_file, &fileStat, true)) {
return *i;
}
}
// check to see if a lock file already exists, since cache could be
// started but no file download was done
for (std::vector<struct CacheParameters>::const_iterator i = _caches.begin(); i != _caches.end(); ++i) {
......@@ -763,7 +809,7 @@ namespace Arc {
return *i;
}
}
// map of cache number and unused space in GB
std::map<int, float> cache_map;
// sum of all cache free space
......@@ -788,9 +834,9 @@ namespace Arc {
// shouldn't be possible to get here
return _caches.front();
}
float FileCache::_getCacheInfo(const std::string& path) const {
struct statvfs info;
if (statvfs(path.c_str(), &info) != 0) {
// if path does not exist info is undefined but the dir will be created in Start() anyway
......
......@@ -67,6 +67,8 @@ namespace Arc {
std::vector<struct CacheParameters> _caches;
/// Vector of caches to be drained.
std::vector<struct CacheParameters> _draining_caches;
/// Vector of read-only caches.
std::vector<struct CacheParameters> _readonly_caches;
/// A list of URLs that have already been unlocked in Link(). URLs in
/// this set will not be unlocked in Stop().
std::set<std::string> _urls_unlocked;
......@@ -99,6 +101,7 @@ namespace Arc {
/// Common code for constructors
bool _init(const std::vector<std::string>& caches,
const std::vector<std::string>& draining_caches,
const std::vector<std::string>& readonly_caches,
const std::string& id,
uid_t job_uid,
gid_t job_gid);
......@@ -170,9 +173,33 @@ namespace Arc {
* @param job_uid owner of job. The per-job dir will only be
* readable by this user
* @param job_gid owner group of job
*
* @deprecated Should be removed in ARC 7, use 3 vector constructor instead
*/
FileCache(const std::vector<std::string>& caches,
const std::vector<std::string>& draining_caches,
const std::string& id,
uid_t job_uid,
gid_t job_gid);
/// Create a new FileCache instance with multiple cache, read-only and and draining cache directories.
/**
* @param caches a vector of strings describing caches. The format
* of each string is "cache_dir[ link_path]".
* @param draining_caches Same format as caches. These are the
* paths to caches which are to be drained.
* @param readonly_caches Same format as caches. Files in these caches
* can be used but no new files are written there.
* @param id the job id. This is used to create the per-job dir
* which the job's cache files will be hard linked from
* @param job_uid owner of job. The per-job dir will only be
* readable by this user
* @param job_gid owner group of job
* @since Added in 6.7
*/
FileCache(const std::vector<std::string>& caches,
const std::vector<std::string>& draining_caches,
const std::vector<std::string>& readonly_caches,
const std::string& id,
uid_t job_uid,
gid_t job_gid);
......
......@@ -46,10 +46,6 @@ public:
void tearDown();
void testStart();
void testRemoteCache();
void testRemoteCacheValidLock();
void testRemoteCacheInvalidLock();
void testRemoteCacheReplication();
void testStop();
void testStopAndDelete();
void testLinkFile();
......@@ -59,6 +55,7 @@ public:
void testRelease();
void testCheckDN();
void testTwoCaches();
void testReadOnlyCache();
void testCreationDate();
void testConstructor();
void testBadConstructor();
......@@ -765,6 +762,29 @@ void FileCacheTest::testTwoCaches() {
CPPUNIT_ASSERT(fc2->Release());
}
void FileCacheTest::testReadOnlyCache() {
// Set up a cache with one normal cache and one read-only cache
std::vector<std::string> caches;
caches.push_back(_cache_dir);
std::vector<std::string> readonly_caches;
caches.push_back(_testroot + "/readonly");
std::vector<std::string> draining_caches;
Arc::FileCache *fc2 = new Arc::FileCache(caches, draining_caches, readonly_caches, "1", _uid, _gid);
CPPUNIT_ASSERT(*fc2);
// Check that new files are always in the normal cache
for (unsigned int i = 0; i < 10; ++i) {
std::string newurl(_url + Arc::tostring(i));
CPPUNIT_ASSERT(fc2->File(newurl).find(_cache_dir) == 0);
}
// Add a file in the read-only cache and check it is used
std::string rofile(_testroot + "readonly/data/8a/929b8384300813ba1dd2d661c42835b80691a2");
CPPUNIT_ASSERT(_createFile(rofile));
CPPUNIT_ASSERT(fc2->File(_url) == rofile);
}
void FileCacheTest::testCreationDate() {
// call with non-existent file
......@@ -857,6 +877,18 @@ void FileCacheTest::testConstructor() {
std::string hash = "/8a/929b8384300813ba1dd2d661c42835b80691a2";
CPPUNIT_ASSERT_EQUAL(std::string(_cache_data_dir + hash), fc8->File(_url));
delete fc8;
// constructor with read-only caches
std::vector<std::string> readonly_caches;
readonly_caches.push_back(_testroot + "readonly");
Arc::FileCache *fc9 = new Arc::FileCache(caches, draining_caches, readonly_caches, _jobid, _uid, _gid);
CPPUNIT_ASSERT(*fc9);
// file should be in main cache
CPPUNIT_ASSERT_EQUAL(std::string(_cache_data_dir + hash), fc9->File(_url));
delete fc9;
}
void FileCacheTest::testBadConstructor() {
......
......@@ -409,11 +409,11 @@ namespace DataStaging {
}
DTRCacheParameters::DTRCacheParameters(std::vector<std::string> caches,
std::vector<std::string> remote_caches,
std::vector<std::string> drain_caches):
std::vector<std::string> drain_caches,
std::vector<std::string> readonly_caches):
cache_dirs(caches),
remote_cache_dirs(remote_caches),
drain_cache_dirs(drain_caches) {
drain_cache_dirs(drain_caches),
readonly_cache_dirs(readonly_caches) {
}
DTRCredentialInfo::DTRCredentialInfo(const std::string& DN,
......
......@@ -123,16 +123,16 @@ namespace DataStaging {
public:
/// List of (cache dir [link dir])
std::vector<std::string> cache_dirs;
/// List of (cache dir [link dir]) for remote caches
std::vector<std::string> remote_cache_dirs;
/// List of draining caches. Not necessary for data staging but here for completeness.
/// List of draining caches
std::vector<std::string> drain_cache_dirs;
/// List of read-only caches
std::vector<std::string> readonly_cache_dirs;
/// Constructor with empty lists initialised
DTRCacheParameters(void) {};
/// Constructor with supplied cache lists
DTRCacheParameters(std::vector<std::string> caches,
std::vector<std::string> remote_caches,
std::vector<std::string> drain_caches);
std::vector<std::string> drain_caches,
std::vector<std::string> readonly_caches);
};
/// Class for storing credential information
......
......@@ -51,6 +51,7 @@ namespace DataStaging {
// Create cache using configuration
Arc::FileCache cache(request->get_cache_parameters().cache_dirs,
request->get_cache_parameters().drain_cache_dirs,
request->get_cache_parameters().readonly_cache_dirs,
request->get_parent_job_id(),
request->get_local_user().get_uid(),
request->get_local_user().get_gid());
......@@ -693,6 +694,7 @@ namespace DataStaging {
Arc::FileCache cache(request->get_cache_parameters().cache_dirs,
request->get_cache_parameters().drain_cache_dirs,
request->get_cache_parameters().readonly_cache_dirs,
request->get_parent_job_id(),
request->get_local_user().get_uid(),
request->get_local_user().get_gid());
......
......@@ -182,7 +182,7 @@ namespace DataStaging {
if (request->get_destination()->IsIndex()) {
request->get_logger()->msg(Arc::VERBOSE, "Will clean up pre-registered destination");
request->set_status(DTRStatus::REGISTER_REPLICA);
} else if (!request->get_cache_parameters().cache_dirs.empty() &&
} else if (!(request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().readonly_cache_dirs.empty()) &&
(request->get_cache_state() == CACHE_ALREADY_PRESENT || request->get_cache_state() == CACHEABLE)) {
request->get_logger()->msg(Arc::VERBOSE, "Will release cache locks");
request->set_status(DTRStatus::PROCESS_CACHE);
......@@ -250,7 +250,8 @@ namespace DataStaging {
request->get_transfer_share(), request->get_priority());
// Normal workflow is CHECK_CACHE
if (request->get_cache_state() == NON_CACHEABLE || request->get_cache_parameters().cache_dirs.empty()) {
if (request->get_cache_state() == NON_CACHEABLE ||
(request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().readonly_cache_dirs.empty())) {
request->get_logger()->msg(Arc::VERBOSE, "File is not cacheable, was requested not to be cached or no cache available, skipping cache check");
request->set_status(DTRStatus::CACHE_CHECKED);
} else {
......@@ -325,7 +326,8 @@ namespace DataStaging {
if(request->error()){
// It's impossible to download anything, since no replica location is resolved
// if cacheable, move to PROCESS_CACHE, the post-processor will do the cleanup
if (request->get_cache_state() == CACHEABLE && !request->get_cache_parameters().cache_dirs.empty()) {
if (request->get_cache_state() == CACHEABLE &&
!(request->get_cache_parameters().cache_dirs.empty() && request->get_cache_parameters().cache_dirs.empty())) {
request->get_logger()->msg(Arc::ERROR, "Problem with index service, will release cache lock");
request->set_status(DTRStatus::PROCESS_CACHE);
// else go to end state
......@@ -530,7 +532,8 @@ namespace DataStaging {
request->get_error_status().GetLastErrorState() == DTRStatus::REGISTERING_REPLICA) {
request->get_logger()->msg(Arc::ERROR, "Error registering replica, moving to end of data staging");
request->set_status(DTRStatus::CACHE_PROCESSED);
} else if (!request->get_cache_parameters().cache_dirs.empty() &&
} else if (!(request->get_cache_parameters().cache_dirs.empty() &&
request->get_cache_parameters().readonly_cache_dirs.empty()) &&
(request->get_cache_state() == CACHE_ALREADY_PRESENT ||
request->get_cache_state() == CACHE_DOWNLOADED ||
request->get_cache_state() == CACHEABLE ||
......
......@@ -24,11 +24,14 @@ namespace ARex {
Arc::MCC_Status ARexService::CacheCheck(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out) {
std::vector<std::string> caches;
std::vector<std::string> draining_caches;
std::vector<std::string> readonly_caches;
// use cache dir(s) from conf file
try {
CacheConfig cache_config(config.GmConfig().CacheParams());
cache_config.substitute(config.GmConfig(), config.User());
caches = cache_config.getCacheDirs();
readonly_caches = cache_config.getReadOnlyCacheDirs();
}
catch (CacheConfigException& e) {
logger.msg(Arc::ERROR, "Error with cache configuration: %s", e.what());
......@@ -45,7 +48,7 @@ Arc::MCC_Status ARexService::CacheCheck(ARexGMConfig& config,Arc::XMLNode in,Arc
return Arc::MCC_Status();
}
Arc::FileCache cache(caches, CACHE_CHECK_SESSION_DIR_ID ,config.User().get_uid(), config.User().get_gid());
Arc::FileCache cache(caches, draining_caches, readonly_caches, CACHE_CHECK_SESSION_DIR_ID ,config.User().get_uid(), config.User().get_gid());
if (!cache) {
logger.msg(Arc::ERROR, "Error with cache configuration");
Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Error with cache configuration");
......
......@@ -529,6 +529,7 @@ Arc::MCC_Status ARexService::cache_get(Arc::Message& outmsg, const std::string&
Arc::FileCache cache(config.GmConfig().CacheParams().getCacheDirs(),
config.GmConfig().CacheParams().getDrainingCacheDirs(),
config.GmConfig().CacheParams().getReadOnlyCacheDirs(),
"0", // Jobid is not used
config.User().get_uid(),
config.User().get_gid());
......
......@@ -155,27 +155,24 @@ void CacheConfig::parseINIConf(Arc::ConfigIni& cf) {
while (cache_dir.length() > 1 && cache_dir.rfind("/") == cache_dir.length()-1) cache_dir = cache_dir.substr(0, cache_dir.length()-1);
if (cache_dir[0] != '/') throw CacheConfigException("Cache path must start with '/'");
if (cache_dir.find("..") != std::string::npos) throw CacheConfigException("Cache path cannot contain '..'");
if (!cache_link_dir.empty() && cache_link_dir != "." && cache_link_dir != "drain") {
if (!cache_link_dir.empty() && cache_link_dir != "." && cache_link_dir != "drain" && cache_link_dir != "readonly") {
while (cache_link_dir.rfind("/") == cache_link_dir.length()-1) cache_link_dir = cache_link_dir.substr(0, cache_link_dir.length()-1);
if (cache_link_dir[0] != '/') throw CacheConfigException("Cache link path must start with '/'");
if (cache_link_dir.find("..") != std::string::npos) throw CacheConfigException("Cache link path cannot contain '..'");
}
// add this cache to our list
std::string cache = cache_dir;
bool isDrainingCache = false;
// check if the cache dir needs to be drained
// check if the cache dir needs to be drained or is read-only
if (cache_link_dir == "drain") {
cache = cache_dir.substr(0, cache_dir.find(' '));
cache_link_dir = "";
isDrainingCache = true;
_draining_cache_dirs.push_back(cache_dir);
}
else if (cache_link_dir == "readonly") {
_readonly_cache_dirs.push_back(cache_dir);
}
else {
if (!cache_link_dir.empty()) {
cache_dir += " "+cache_link_dir;
}
_cache_dirs.push_back(cache_dir);
}
if (!cache_link_dir.empty())
cache += " "+cache_link_dir;
if (isDrainingCache)
_draining_cache_dirs.push_back(cache);
else
_cache_dirs.push_back(cache);
}
}
} else if (cf.SectionNum() == 2) { // arex/ws/cache
......@@ -209,6 +206,9 @@ void CacheConfig::substitute(const GMConfig& config, const Arc::User& user) {
for (std::vector<std::string>::iterator i = _draining_cache_dirs.begin(); i != _draining_cache_dirs.end(); ++i) {
config.Substitute(*i, user);
}
for (std::vector<std::string>::iterator i = _readonly_cache_dirs.begin(); i != _readonly_cache_dirs.end(); ++i) {
config.Substitute(*i, user);
}
}
} // namespace ARex
......@@ -49,6 +49,10 @@ class CacheConfig {
* Cache directories that are needed to be drained
**/
std::vector<std::string> _draining_cache_dirs;
/**
* Cache directories that are read-only
**/
std::vector<std::string> _readonly_cache_dirs;
/**
* Logfile for cache cleaning messages
*/
......@@ -93,6 +97,7 @@ class CacheConfig {
CacheConfig(): _cache_max(0), _cache_min(0), _cleaning_enabled(false), _cache_shared(false), _clean_timeout(0) {};
std::vector<std::string> getCacheDirs() const { return _cache_dirs; };
std::vector<std::string> getDrainingCacheDirs() const { return _draining_cache_dirs; };
std::vector<std::string> getReadOnlyCacheDirs() const { return _readonly_cache_dirs; };
/// Substitute all cache paths, with information given in user if necessary
void substitute(const GMConfig& config, const Arc::User& user);
int getCacheMax() const { return _cache_max; };
......
......@@ -123,7 +123,8 @@ void GMConfig::Print() const {
logger.msg(Arc::INFO, "\tdefault ttl : %u", keep_finished);
std::vector<std::string> conf_caches = cache_params.getCacheDirs();
if(conf_caches.empty()) {
std::vector<std::string> readonly_caches = cache_params.getReadOnlyCacheDirs();
if(conf_caches.empty() && readonly_caches.empty()) {
logger.msg(Arc::INFO,"No valid caches found in configuration, caching is disabled");
return;
}
......@@ -133,6 +134,9 @@ void GMConfig::Print() const {
if ((*i).find(" ") != std::string::npos)
logger.msg(Arc::INFO, "\tCache link dir : %s", (*i).substr((*i).find_last_of(" ")+1, (*i).length()-(*i).find_last_of(" ")+1));
}
for (std::vector<std::string>::iterator i = readonly_caches.begin(); i != readonly_caches.end(); i++) {
logger.msg(Arc::INFO, "\tCache (read-only): %s", *i);
}
if (cache_params.cleanCache()) logger.msg(Arc::INFO, "\tCache cleaning enabled");
else logger.msg(Arc::INFO, "\tCache cleaning disabled");
}
......
......@@ -1053,6 +1053,7 @@ bool DTRGenerator::processReceivedJob(GMJobRef& job) {
// Substitute cache paths
cache_params.substitute(config, job->get_user());
cache_parameters.cache_dirs = cache_params.getCacheDirs();
cache_parameters.readonly_cache_dirs = cache_params.getReadOnlyCacheDirs();
dtr->set_cache_parameters(cache_parameters);
dtr->registerCallback(this,DataStaging::GENERATOR);
dtr->registerCallback(scheduler, DataStaging::SCHEDULER);
......@@ -1364,6 +1365,7 @@ void DTRGenerator::CleanCacheJobLinks(const GMConfig& config, const GMJobRef& jo
// there is no uid switch during Release so uid/gid is not so important
Arc::FileCache cache(cache_config.getCacheDirs(),
cache_config.getDrainingCacheDirs(),
cache_config.getReadOnlyCacheDirs(),
job->get_id(), job->get_user().get_uid(), job->get_user().get_gid());
cache.Release();
Arc::Time processing_end;
......
......@@ -1220,7 +1220,12 @@ JobsList::ActJobResult JobsList::ActJobFinished(GMJobRef i) {
// add draining caches
std::vector<std::string> draining_caches = cache_config.getDrainingCacheDirs();
for (std::vector<std::string>::iterator it = draining_caches.begin(); it != draining_caches.end(); it++) {
cache_per_job_dirs.push_back(it->substr(0, it->find(" "))+"/joblinks");
cache_per_job_dirs.push_back(*it+"/joblinks");
}
// and read-only caches
std::vector<std::string> readonly_caches = cache_config.getReadOnlyCacheDirs();
for (std::vector<std::string>::iterator it = readonly_caches.begin(); it != readonly_caches.end(); it++) {
cache_per_job_dirs.push_back(*it+"/joblinks");
}
job_clean_deleted(*i,config,cache_per_job_dirs);
SetJobState(i, JOB_STATE_DELETED, "Job stayed unattended too long");
......
......@@ -61,7 +61,7 @@ CandyPond::CandyPond(Arc::Config *cfg, Arc::PluginArgument* parg) :
return;
}
config.Print();
if (config.CacheParams().getCacheDirs().empty()) {
if (config.CacheParams().getCacheDirs().empty() && config.CacheParams().getReadOnlyCacheDirs().empty()) {
logger.msg(Arc::ERROR, "No caches defined in configuration");
return;
}
......@@ -109,7 +109,10 @@ Arc::MCC_Status CandyPond::CacheCheck(Arc::XMLNode in, Arc::XMLNode out, const A
// substitute cache paths according to mapped user
ARex::CacheConfig cache_params(config.CacheParams());
cache_params.substitute(config, mapped_user);
Arc::FileCache cache(cache_params.getCacheDirs(), "0", mapped_user.get_uid(), mapped_user.get_gid());
Arc::FileCache cache(cache_params.getCacheDirs(),
cache_params.getDrainingCacheDirs(),
cache_params.getReadOnlyCacheDirs(),
"0", mapped_user.get_uid(), mapped_user.get_gid());
if (!cache) {
logger.msg(Arc::ERROR, "Error creating cache");
return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheCheck", "Server error with cache");
......@@ -307,7 +310,10 @@ Arc::MCC_Status CandyPond::CacheLink(Arc::XMLNode in, Arc::XMLNode out, const Ar
// substitute cache paths according to mapped user
ARex::CacheConfig cache_params(config.CacheParams());
cache_params.substitute(config, mapped_user);
Arc::FileCache cache(cache_params.getCacheDirs(), jobid, mapped_user.get_uid(), mapped_user.get_gid());
Arc::FileCache cache(cache_params.getCacheDirs(),
cache_params.getDrainingCacheDirs(),
cache_params.getReadOnlyCacheDirs(),
jobid, mapped_user.get_uid(), mapped_user.get_gid());
if (!cache) {
logger.msg(Arc::ERROR, "Error with cache configuration");
return Arc::MCC_Status(Arc::GENERIC_ERROR, "CacheCheck", "Server error with cache");
......
......@@ -183,7 +183,7 @@ namespace CandyPond {
cache_params.substitute(config, user);
DataStaging::DTRCacheParameters cache_parameters;
cache_parameters.cache_dirs = cache_params.getCacheDirs();
// we are definitely going to download so remote caches are not useful here
// we are definitely going to download so read-only caches are not useful here
dtr->set_cache_parameters(cache_parameters);
dtr->registerCallback(this, DataStaging::GENERATOR);
dtr->registerCallback(scheduler, DataStaging::SCHEDULER);
......
......@@ -25,6 +25,8 @@ class CacheControl(ComponentControl):
if not self.cache_dirs:
self.logger.error('Failed to get cache directories from arc.conf.')
sys.exit(1)
# Strip off any options
self.cache_dirs = [i.split()[0] for i in self.cache_dirs]
self.logger.debug('Following cache locations found: %s', ','.join(self.cache_dirs))
def stats(self):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment