Implement locking for update/bulkchange process

Avoid the possibility of these two clashing especially when multiple
branches are being used.

Signed-off-by: Paul Eggleton <paul.eggleton@linux.intel.com>
This commit is contained in:
Paul Eggleton 2013-07-27 21:55:50 +01:00
parent 1643aef67d
commit 1eebd6e525
3 changed files with 350 additions and 315 deletions

View File

@ -215,14 +215,23 @@ def main():
fetchdir = settings.LAYER_FETCH_DIR fetchdir = settings.LAYER_FETCH_DIR
bitbakepath = os.path.join(fetchdir, 'bitbake') bitbakepath = os.path.join(fetchdir, 'bitbake')
(tinfoil, tempdir) = recipeparse.init_parser(settings, branch, bitbakepath, True) lockfn = os.path.join(fetchdir, "layerindex.lock")
lockfile = utils.lock_file(lockfn)
changeset = get_changeset(sys.argv[1]) if not lockfile:
if not changeset: sys.stderr.write("Layer index lock timeout expired\n")
sys.stderr.write("Unable to find changeset with id %s\n" % sys.argv[1])
sys.exit(1) sys.exit(1)
try:
(tinfoil, tempdir) = recipeparse.init_parser(settings, branch, bitbakepath, True)
changeset = get_changeset(sys.argv[1])
if not changeset:
sys.stderr.write("Unable to find changeset with id %s\n" % sys.argv[1])
sys.exit(1)
outp = generate_patches(tinfoil, fetchdir, changeset, sys.argv[2])
finally:
utils.unlock_file(lockfile)
outp = generate_patches(tinfoil, fetchdir, changeset, sys.argv[2])
if outp: if outp:
print outp print outp
else: else:

View File

@ -174,344 +174,353 @@ def main():
fetchedrepos = [] fetchedrepos = []
failedrepos = [] failedrepos = []
bitbakepath = os.path.join(fetchdir, 'bitbake') lockfn = os.path.join(fetchdir, "layerindex.lock")
lockfile = utils.lock_file(lockfn)
if not lockfile:
logger.error("Layer index lock timeout expired")
sys.exit(1)
try:
bitbakepath = os.path.join(fetchdir, 'bitbake')
if not options.nofetch: if not options.nofetch:
# Fetch latest metadata from repositories # Fetch latest metadata from repositories
for layer in layerquery: for layer in layerquery:
# Handle multiple layers in a single repo # Handle multiple layers in a single repo
urldir = layer.get_fetch_dir() urldir = layer.get_fetch_dir()
repodir = os.path.join(fetchdir, urldir) repodir = os.path.join(fetchdir, urldir)
if not (layer.vcs_url in fetchedrepos or layer.vcs_url in failedrepos): if not (layer.vcs_url in fetchedrepos or layer.vcs_url in failedrepos):
logger.info("Fetching remote repository %s" % layer.vcs_url) logger.info("Fetching remote repository %s" % layer.vcs_url)
out = None out = None
try: try:
if not os.path.exists(repodir): if not os.path.exists(repodir):
out = utils.runcmd("git clone %s %s" % (layer.vcs_url, urldir), fetchdir) out = utils.runcmd("git clone %s %s" % (layer.vcs_url, urldir), fetchdir)
else: else:
out = utils.runcmd("git fetch", repodir) out = utils.runcmd("git fetch", repodir)
except Exception as e: except Exception as e:
logger.error("Fetch of layer %s failed: %s" % (layer.name, str(e))) logger.error("Fetch of layer %s failed: %s" % (layer.name, str(e)))
failedrepos.append(layer.vcs_url) failedrepos.append(layer.vcs_url)
continue continue
fetchedrepos.append(layer.vcs_url) fetchedrepos.append(layer.vcs_url)
if not fetchedrepos: if not fetchedrepos:
logger.error("No repositories could be fetched, exiting") logger.error("No repositories could be fetched, exiting")
sys.exit(1)
logger.info("Fetching bitbake from remote repository %s" % settings.BITBAKE_REPO_URL)
if not os.path.exists(bitbakepath):
out = utils.runcmd("git clone %s %s" % (settings.BITBAKE_REPO_URL, 'bitbake'), fetchdir)
else:
out = utils.runcmd("git fetch", bitbakepath)
try:
(tinfoil, tempdir) = recipeparse.init_parser(settings, branch, bitbakepath, nocheckout=options.nocheckout)
except recipeparse.RecipeParseError as e:
logger.error(str(e))
sys.exit(1) sys.exit(1)
logger.info("Fetching bitbake from remote repository %s" % settings.BITBAKE_REPO_URL) # Clear the default value of SUMMARY so that we can use DESCRIPTION instead if it hasn't been set
if not os.path.exists(bitbakepath): tinfoil.config_data.setVar('SUMMARY', '')
out = utils.runcmd("git clone %s %s" % (settings.BITBAKE_REPO_URL, 'bitbake'), fetchdir) # Clear the default value of DESCRIPTION so that we can see where it's not set
else: tinfoil.config_data.setVar('DESCRIPTION', '')
out = utils.runcmd("git fetch", bitbakepath) # Clear the default value of HOMEPAGE ('unknown')
tinfoil.config_data.setVar('HOMEPAGE', '')
# Set a blank value for LICENSE so that it doesn't cause the parser to die (e.g. with meta-ti -
# why won't they just fix that?!)
tinfoil.config_data.setVar('LICENSE', '')
try: # Process and extract data from each layer
(tinfoil, tempdir) = recipeparse.init_parser(settings, branch, bitbakepath, nocheckout=options.nocheckout) for layer in layerquery:
except recipeparse.RecipeParseError as e: transaction.enter_transaction_management()
logger.error(str(e)) transaction.managed(True)
sys.exit(1)
# Clear the default value of SUMMARY so that we can use DESCRIPTION instead if it hasn't been set
tinfoil.config_data.setVar('SUMMARY', '')
# Clear the default value of DESCRIPTION so that we can see where it's not set
tinfoil.config_data.setVar('DESCRIPTION', '')
# Clear the default value of HOMEPAGE ('unknown')
tinfoil.config_data.setVar('HOMEPAGE', '')
# Set a blank value for LICENSE so that it doesn't cause the parser to die (e.g. with meta-ti -
# why won't they just fix that?!)
tinfoil.config_data.setVar('LICENSE', '')
# Process and extract data from each layer
for layer in layerquery:
transaction.enter_transaction_management()
transaction.managed(True)
try:
urldir = layer.get_fetch_dir()
repodir = os.path.join(fetchdir, urldir)
if layer.vcs_url in failedrepos:
logger.info("Skipping update of layer %s as fetch of repository %s failed" % (layer.name, layer.vcs_url))
transaction.rollback()
continue
layerbranch = layer.get_layerbranch(options.branch)
branchname = options.branch
branchdesc = options.branch
if layerbranch:
if layerbranch.actual_branch:
branchname = layerbranch.actual_branch
branchdesc = "%s (%s)" % (options.branch, branchname)
# Collect repo info
repo = git.Repo(repodir)
assert repo.bare == False
try: try:
topcommit = repo.commit('origin/%s' % branchname) urldir = layer.get_fetch_dir()
except: repodir = os.path.join(fetchdir, urldir)
if layer.vcs_url in failedrepos:
logger.info("Skipping update of layer %s as fetch of repository %s failed" % (layer.name, layer.vcs_url))
transaction.rollback()
continue
layerbranch = layer.get_layerbranch(options.branch)
branchname = options.branch
branchdesc = options.branch
if layerbranch: if layerbranch:
logger.error("Failed update of layer %s - branch %s no longer exists" % (layer.name, branchdesc)) if layerbranch.actual_branch:
else: branchname = layerbranch.actual_branch
logger.info("Skipping update of layer %s - branch %s doesn't exist" % (layer.name, branchdesc)) branchdesc = "%s (%s)" % (options.branch, branchname)
transaction.rollback()
continue
if not layerbranch:
# LayerBranch doesn't exist for this branch, create it
layerbranch = LayerBranch()
layerbranch.layer = layer
layerbranch.branch = branch
layerbranch_master = layer.get_layerbranch('master')
if layerbranch_master:
layerbranch.vcs_subdir = layerbranch_master.vcs_subdir
layerbranch.save()
if layerbranch_master:
for maintainer in layerbranch_master.layermaintainer_set.all():
maintainer.pk = None
maintainer.id = None
maintainer.layerbranch = layerbranch
maintainer.save()
for dep in layerbranch_master.dependencies_set.all():
dep.pk = None
dep.id = None
dep.layerbranch = layerbranch
dep.save()
if layerbranch.vcs_subdir:
# Find latest commit in subdirectory
# A bit odd to do it this way but apparently there's no other way in the GitPython API
for commit in repo.iter_commits('origin/%s' % options.branch, paths=layerbranch.vcs_subdir):
topcommit = commit
break
layerdir = os.path.join(repodir, layerbranch.vcs_subdir)
layerdir_start = os.path.normpath(layerdir) + os.sep
layerrecipes = Recipe.objects.filter(layerbranch=layerbranch)
layermachines = Machine.objects.filter(layerbranch=layerbranch)
layerappends = BBAppend.objects.filter(layerbranch=layerbranch)
layerclasses = BBClass.objects.filter(layerbranch=layerbranch)
if layerbranch.vcs_last_rev != topcommit.hexsha or options.reload:
# Check out appropriate branch
if not options.nocheckout:
out = utils.runcmd("git checkout origin/%s" % branchname, repodir)
out = utils.runcmd("git clean -f -x", repodir)
if not os.path.exists(layerdir):
if options.branch == 'master':
logger.error("Subdirectory for layer %s does not exist on branch %s!" % branchdesc)
transaction.rollback()
continue
else:
logger.info("Skipping update of layer %s for branch %s - subdirectory does not exist on this branch" % (layer.name, branchdesc))
transaction.rollback()
continue
if not os.path.exists(os.path.join(layerdir, 'conf/layer.conf')):
logger.error("conf/layer.conf not found for layer %s - is subdirectory set correctly?" % layer.name)
transaction.rollback()
continue
logger.info("Collecting data for layer %s on branch %s" % (layer.name, branchdesc))
# Collect repo info
repo = git.Repo(repodir)
assert repo.bare == False
try: try:
config_data_copy = recipeparse.setup_layer(tinfoil.config_data, fetchdir, layerdir, layer, layerbranch) topcommit = repo.commit('origin/%s' % branchname)
except recipeparse.RecipeParseError as e: except:
logger.error(str(e)) if layerbranch:
logger.error("Failed update of layer %s - branch %s no longer exists" % (layer.name, branchdesc))
else:
logger.info("Skipping update of layer %s - branch %s doesn't exist" % (layer.name, branchdesc))
transaction.rollback() transaction.rollback()
continue continue
if layerbranch.vcs_last_rev and not options.reload: if not layerbranch:
# LayerBranch doesn't exist for this branch, create it
layerbranch = LayerBranch()
layerbranch.layer = layer
layerbranch.branch = branch
layerbranch_master = layer.get_layerbranch('master')
if layerbranch_master:
layerbranch.vcs_subdir = layerbranch_master.vcs_subdir
layerbranch.save()
if layerbranch_master:
for maintainer in layerbranch_master.layermaintainer_set.all():
maintainer.pk = None
maintainer.id = None
maintainer.layerbranch = layerbranch
maintainer.save()
for dep in layerbranch_master.dependencies_set.all():
dep.pk = None
dep.id = None
dep.layerbranch = layerbranch
dep.save()
if layerbranch.vcs_subdir:
# Find latest commit in subdirectory
# A bit odd to do it this way but apparently there's no other way in the GitPython API
for commit in repo.iter_commits('origin/%s' % options.branch, paths=layerbranch.vcs_subdir):
topcommit = commit
break
layerdir = os.path.join(repodir, layerbranch.vcs_subdir)
layerdir_start = os.path.normpath(layerdir) + os.sep
layerrecipes = Recipe.objects.filter(layerbranch=layerbranch)
layermachines = Machine.objects.filter(layerbranch=layerbranch)
layerappends = BBAppend.objects.filter(layerbranch=layerbranch)
layerclasses = BBClass.objects.filter(layerbranch=layerbranch)
if layerbranch.vcs_last_rev != topcommit.hexsha or options.reload:
# Check out appropriate branch
if not options.nocheckout:
out = utils.runcmd("git checkout origin/%s" % branchname, repodir)
out = utils.runcmd("git clean -f -x", repodir)
if not os.path.exists(layerdir):
if options.branch == 'master':
logger.error("Subdirectory for layer %s does not exist on branch %s!" % branchdesc)
transaction.rollback()
continue
else:
logger.info("Skipping update of layer %s for branch %s - subdirectory does not exist on this branch" % (layer.name, branchdesc))
transaction.rollback()
continue
if not os.path.exists(os.path.join(layerdir, 'conf/layer.conf')):
logger.error("conf/layer.conf not found for layer %s - is subdirectory set correctly?" % layer.name)
transaction.rollback()
continue
logger.info("Collecting data for layer %s on branch %s" % (layer.name, branchdesc))
try: try:
diff = repo.commit(layerbranch.vcs_last_rev).diff(topcommit) config_data_copy = recipeparse.setup_layer(tinfoil.config_data, fetchdir, layerdir, layer, layerbranch)
except Exception as e: except recipeparse.RecipeParseError as e:
logger.warn("Unable to get diff from last commit hash for layer %s - falling back to slow update: %s" % (layer.name, str(e))) logger.error(str(e))
diff = None transaction.rollback()
else: continue
diff = None
# We handle recipes specially to try to preserve the same id if layerbranch.vcs_last_rev and not options.reload:
# when recipe upgrades happen (so that if a user bookmarks a try:
# recipe page it remains valid) diff = repo.commit(layerbranch.vcs_last_rev).diff(topcommit)
layerrecipes_delete = [] except Exception as e:
layerrecipes_add = [] logger.warn("Unable to get diff from last commit hash for layer %s - falling back to slow update: %s" % (layer.name, str(e)))
diff = None
if diff:
# Apply git changes to existing recipe list
if layerbranch.vcs_subdir:
subdir_start = os.path.normpath(layerbranch.vcs_subdir) + os.sep
else: else:
subdir_start = "" diff = None
updatedrecipes = set() # We handle recipes specially to try to preserve the same id
for d in diff.iter_change_type('D'): # when recipe upgrades happen (so that if a user bookmarks a
path = d.a_blob.path # recipe page it remains valid)
if path.startswith(subdir_start): layerrecipes_delete = []
(typename, filepath, filename) = recipeparse.detect_file_type(path, subdir_start) layerrecipes_add = []
if typename == 'recipe':
values = layerrecipes.filter(filepath=filepath).filter(filename=filename).values('id', 'filepath', 'filename', 'pn')
layerrecipes_delete.append(values[0])
logger.debug("Mark %s for deletion" % values[0])
updatedrecipes.add(os.path.join(values[0]['filepath'], values[0]['filename']))
elif typename == 'bbappend':
layerappends.filter(filepath=filepath).filter(filename=filename).delete()
elif typename == 'machine':
layermachines.filter(name=filename).delete()
elif typename == 'bbclass':
layerclasses.filter(name=filename).delete()
for d in diff.iter_change_type('A'): if diff:
path = d.b_blob.path # Apply git changes to existing recipe list
if path.startswith(subdir_start):
(typename, filepath, filename) = recipeparse.detect_file_type(path, subdir_start)
if typename == 'recipe':
layerrecipes_add.append(os.path.join(repodir, path))
logger.debug("Mark %s for addition" % path)
updatedrecipes.add(os.path.join(filepath, filename))
elif typename == 'bbappend':
append = BBAppend()
append.layerbranch = layerbranch
append.filename = filename
append.filepath = filepath
append.save()
elif typename == 'machine':
machine = Machine()
machine.layerbranch = layerbranch
machine.name = filename
update_machine_conf_file(os.path.join(repodir, path), machine)
machine.save()
elif typename == 'bbclass':
bbclass = BBClass()
bbclass.layerbranch = layerbranch
bbclass.name = filename
bbclass.save()
dirtyrecipes = set() if layerbranch.vcs_subdir:
for d in diff.iter_change_type('M'): subdir_start = os.path.normpath(layerbranch.vcs_subdir) + os.sep
path = d.a_blob.path else:
if path.startswith(subdir_start): subdir_start = ""
(typename, filepath, filename) = recipeparse.detect_file_type(path, subdir_start)
if typename == 'recipe': updatedrecipes = set()
logger.debug("Mark %s for update" % path) for d in diff.iter_change_type('D'):
results = layerrecipes.filter(filepath=filepath).filter(filename=filename)[:1] path = d.a_blob.path
if results: if path.startswith(subdir_start):
recipe = results[0] (typename, filepath, filename) = recipeparse.detect_file_type(path, subdir_start)
update_recipe_file(config_data_copy, os.path.join(layerdir, filepath), recipe, layerdir_start, repodir) if typename == 'recipe':
recipe.save() values = layerrecipes.filter(filepath=filepath).filter(filename=filename).values('id', 'filepath', 'filename', 'pn')
updatedrecipes.add(recipe.full_path()) layerrecipes_delete.append(values[0])
elif typename == 'machine': logger.debug("Mark %s for deletion" % values[0])
results = layermachines.filter(name=filename) updatedrecipes.add(os.path.join(values[0]['filepath'], values[0]['filename']))
if results: elif typename == 'bbappend':
machine = results[0] layerappends.filter(filepath=filepath).filter(filename=filename).delete()
elif typename == 'machine':
layermachines.filter(name=filename).delete()
elif typename == 'bbclass':
layerclasses.filter(name=filename).delete()
for d in diff.iter_change_type('A'):
path = d.b_blob.path
if path.startswith(subdir_start):
(typename, filepath, filename) = recipeparse.detect_file_type(path, subdir_start)
if typename == 'recipe':
layerrecipes_add.append(os.path.join(repodir, path))
logger.debug("Mark %s for addition" % path)
updatedrecipes.add(os.path.join(filepath, filename))
elif typename == 'bbappend':
append = BBAppend()
append.layerbranch = layerbranch
append.filename = filename
append.filepath = filepath
append.save()
elif typename == 'machine':
machine = Machine()
machine.layerbranch = layerbranch
machine.name = filename
update_machine_conf_file(os.path.join(repodir, path), machine) update_machine_conf_file(os.path.join(repodir, path), machine)
machine.save() machine.save()
elif typename == 'bbclass':
bbclass = BBClass()
bbclass.layerbranch = layerbranch
bbclass.name = filename
bbclass.save()
deps = RecipeFileDependency.objects.filter(layerbranch=layerbranch).filter(path=path) dirtyrecipes = set()
for dep in deps: for d in diff.iter_change_type('M'):
dirtyrecipes.add(dep.recipe) path = d.a_blob.path
if path.startswith(subdir_start):
(typename, filepath, filename) = recipeparse.detect_file_type(path, subdir_start)
if typename == 'recipe':
logger.debug("Mark %s for update" % path)
results = layerrecipes.filter(filepath=filepath).filter(filename=filename)[:1]
if results:
recipe = results[0]
update_recipe_file(config_data_copy, os.path.join(layerdir, filepath), recipe, layerdir_start, repodir)
recipe.save()
updatedrecipes.add(recipe.full_path())
elif typename == 'machine':
results = layermachines.filter(name=filename)
if results:
machine = results[0]
update_machine_conf_file(os.path.join(repodir, path), machine)
machine.save()
for recipe in dirtyrecipes: deps = RecipeFileDependency.objects.filter(layerbranch=layerbranch).filter(path=path)
if not recipe.full_path() in updatedrecipes: for dep in deps:
update_recipe_file(config_data_copy, os.path.join(layerdir, recipe.filepath), recipe, layerdir_start, repodir) dirtyrecipes.add(dep.recipe)
else:
# Collect recipe data from scratch
# First, check which recipes still exist for recipe in dirtyrecipes:
layerrecipe_values = layerrecipes.values('id', 'filepath', 'filename', 'pn') if not recipe.full_path() in updatedrecipes:
layerrecipe_fns = [] update_recipe_file(config_data_copy, os.path.join(layerdir, recipe.filepath), recipe, layerdir_start, repodir)
for v in layerrecipe_values:
root = os.path.join(layerdir, v['filepath'])
fullpath = os.path.join(root, v['filename'])
if os.path.exists(fullpath):
# Recipe still exists, update it
results = layerrecipes.filter(id=v['id'])[:1]
recipe = results[0]
update_recipe_file(config_data_copy, root, recipe, layerdir_start, repodir)
else:
# Recipe no longer exists, mark it for later on
layerrecipes_delete.append(v)
layerrecipe_fns.append(fullpath)
layermachines.delete()
layerappends.delete()
layerclasses.delete()
for root, dirs, files in os.walk(layerdir):
if '.git' in dirs:
dirs.remove('.git')
for f in files:
fullpath = os.path.join(root, f)
(typename, _, filename) = recipeparse.detect_file_type(fullpath, layerdir_start)
if typename == 'recipe':
if fullpath not in layerrecipe_fns:
layerrecipes_add.append(fullpath)
elif typename == 'bbappend':
append = BBAppend()
append.layerbranch = layerbranch
append.filename = f
append.filepath = os.path.relpath(root, layerdir)
append.save()
elif typename == 'machine':
machine = Machine()
machine.layerbranch = layerbranch
machine.name = filename
update_machine_conf_file(fullpath, machine)
machine.save()
elif typename == 'bbclass':
bbclass = BBClass()
bbclass.layerbranch = layerbranch
bbclass.name = filename
bbclass.save()
for added in layerrecipes_add:
# This is good enough without actually parsing the file
(pn, pv) = split_recipe_fn(added)
oldid = -1
for deleted in layerrecipes_delete:
if deleted['pn'] == pn:
oldid = deleted['id']
layerrecipes_delete.remove(deleted)
break
if oldid > -1:
# Reclaim a record we would have deleted
results = Recipe.objects.filter(id=oldid)[:1]
recipe = results[0]
logger.debug("Reclaim %s for %s %s" % (recipe, pn, pv))
else: else:
# Create new record # Collect recipe data from scratch
logger.debug("Add new recipe %s" % added)
recipe = Recipe()
recipe.layerbranch = layerbranch
recipe.filename = os.path.basename(added)
root = os.path.dirname(added)
recipe.filepath = os.path.relpath(root, layerdir)
update_recipe_file(config_data_copy, root, recipe, layerdir_start, repodir)
recipe.save()
for deleted in layerrecipes_delete: # First, check which recipes still exist
logger.debug("Delete %s" % deleted) layerrecipe_values = layerrecipes.values('id', 'filepath', 'filename', 'pn')
results = Recipe.objects.filter(id=deleted['id'])[:1] layerrecipe_fns = []
recipe = results[0] for v in layerrecipe_values:
recipe.delete() root = os.path.join(layerdir, v['filepath'])
fullpath = os.path.join(root, v['filename'])
if os.path.exists(fullpath):
# Recipe still exists, update it
results = layerrecipes.filter(id=v['id'])[:1]
recipe = results[0]
update_recipe_file(config_data_copy, root, recipe, layerdir_start, repodir)
else:
# Recipe no longer exists, mark it for later on
layerrecipes_delete.append(v)
layerrecipe_fns.append(fullpath)
# Save repo info layermachines.delete()
layerbranch.vcs_last_rev = topcommit.hexsha layerappends.delete()
layerbranch.vcs_last_commit = datetime.fromtimestamp(topcommit.committed_date) layerclasses.delete()
else: for root, dirs, files in os.walk(layerdir):
logger.info("Layer %s is already up-to-date for branch %s" % (layer.name, branchdesc)) if '.git' in dirs:
dirs.remove('.git')
for f in files:
fullpath = os.path.join(root, f)
(typename, _, filename) = recipeparse.detect_file_type(fullpath, layerdir_start)
if typename == 'recipe':
if fullpath not in layerrecipe_fns:
layerrecipes_add.append(fullpath)
elif typename == 'bbappend':
append = BBAppend()
append.layerbranch = layerbranch
append.filename = f
append.filepath = os.path.relpath(root, layerdir)
append.save()
elif typename == 'machine':
machine = Machine()
machine.layerbranch = layerbranch
machine.name = filename
update_machine_conf_file(fullpath, machine)
machine.save()
elif typename == 'bbclass':
bbclass = BBClass()
bbclass.layerbranch = layerbranch
bbclass.name = filename
bbclass.save()
layerbranch.vcs_last_fetch = datetime.now() for added in layerrecipes_add:
layerbranch.save() # This is good enough without actually parsing the file
(pn, pv) = split_recipe_fn(added)
oldid = -1
for deleted in layerrecipes_delete:
if deleted['pn'] == pn:
oldid = deleted['id']
layerrecipes_delete.remove(deleted)
break
if oldid > -1:
# Reclaim a record we would have deleted
results = Recipe.objects.filter(id=oldid)[:1]
recipe = results[0]
logger.debug("Reclaim %s for %s %s" % (recipe, pn, pv))
else:
# Create new record
logger.debug("Add new recipe %s" % added)
recipe = Recipe()
recipe.layerbranch = layerbranch
recipe.filename = os.path.basename(added)
root = os.path.dirname(added)
recipe.filepath = os.path.relpath(root, layerdir)
update_recipe_file(config_data_copy, root, recipe, layerdir_start, repodir)
recipe.save()
if options.dryrun: for deleted in layerrecipes_delete:
logger.debug("Delete %s" % deleted)
results = Recipe.objects.filter(id=deleted['id'])[:1]
recipe = results[0]
recipe.delete()
# Save repo info
layerbranch.vcs_last_rev = topcommit.hexsha
layerbranch.vcs_last_commit = datetime.fromtimestamp(topcommit.committed_date)
else:
logger.info("Layer %s is already up-to-date for branch %s" % (layer.name, branchdesc))
layerbranch.vcs_last_fetch = datetime.now()
layerbranch.save()
if options.dryrun:
transaction.rollback()
else:
transaction.commit()
except:
import traceback
traceback.print_exc()
transaction.rollback() transaction.rollback()
else: finally:
transaction.commit() transaction.leave_transaction_management()
except:
import traceback finally:
traceback.print_exc() utils.unlock_file(lockfile)
transaction.rollback()
finally:
transaction.leave_transaction_management()
shutil.rmtree(tempdir) shutil.rmtree(tempdir)
sys.exit(0) sys.exit(0)

View File

@ -9,6 +9,8 @@ import sys
import os.path import os.path
import subprocess import subprocess
import logging import logging
import time
import fcntl
def get_branch(branchname): def get_branch(branchname):
from layerindex.models import Branch from layerindex.models import Branch
@ -63,3 +65,18 @@ def logger_create(name):
logger.addHandler(loggerhandler) logger.addHandler(loggerhandler)
logger.setLevel(logging.INFO) logger.setLevel(logging.INFO)
return logger return logger
def lock_file(fn):
starttime = time.time()
while True:
lock = open(fn, 'w')
try:
fcntl.flock(lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
return lock
except IOError:
lock.close()
if time.time() - starttime > 30:
return None
def unlock_file(lock):
fcntl.flock(lock, fcntl.LOCK_UN)