From: Mark Hymers Date: Fri, 30 Oct 2009 22:26:30 +0000 (+0000) Subject: Merge commit 'djpig/process-upload' X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=8c6875857c86b85f14e78da51e014845e538ef26;p=dak Merge commit 'djpig/process-upload' Also remove p-a and p-unchecked to avoid confusion Conflicts: daklib/dbconn.py daklib/queue.py Signed-off-by: Mark Hymers --- 8c6875857c86b85f14e78da51e014845e538ef26 diff --cc dak/dak.py index e424836f,e424836f..47bbedfa --- a/dak/dak.py +++ b/dak/dak.py @@@ -66,10 -66,10 +66,8 @@@ def init() ("process-new", "Process NEW and BYHAND packages"), -- ("process-unchecked", ++ ("process-upload", "Process packages in queue/unchecked"), -- ("process-accepted", -- "Install packages into the pool"), ("make-suite-file-list", "Generate lists of packages per suite for apt-ftparchive"), diff --cc dak/process_accepted.py index b203f498,b203f498..00000000 deleted file mode 100755,100755 --- a/dak/process_accepted.py +++ /dev/null @@@ -1,706 -1,706 +1,0 @@@ --#!/usr/bin/env python -- --""" --Installs Debian packages from queue/accepted into the pool -- --@contact: Debian FTP Master --@copyright: 2000, 2001, 2002, 2003, 2004, 2006 James Troup --@copyright: 2009 Joerg Jaspert --@license: GNU General Public License version 2 or later -- --""" --# This program is free software; you can redistribute it and/or modify --# it under the terms of the GNU General Public License as published by --# the Free Software Foundation; either version 2 of the License, or --# (at your option) any later version. -- --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. -- --# You should have received a copy of the GNU General Public License --# along with this program; if not, write to the Free Software --# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -- --############################################################################### -- --# Cartman: "I'm trying to make the best of a bad situation, I don't --# need to hear crap from a bunch of hippy freaks living in --# denial. Screw you guys, I'm going home." --# --# Kyle: "But Cartman, we're trying to..." --# --# Cartman: "uhh.. screw you guys... home." -- --############################################################################### -- --import errno --import fcntl --import os --import sys --from datetime import datetime --import apt_pkg -- --from daklib import daklog --from daklib.queue import * --from daklib import utils --from daklib.dbconn import * --from daklib.dak_exceptions import * --from daklib.regexes import re_default_answer, re_issource, re_fdnic --from daklib.urgencylog import UrgencyLog --from daklib.summarystats import SummaryStats --from daklib.config import Config -- --############################################################################### -- --Options = None --Logger = None -- --############################################################################### -- --def init(): -- global Options -- -- # Initialize config and connection to db -- cnf = Config() -- DBConn() -- -- Arguments = [('a',"automatic","Dinstall::Options::Automatic"), -- ('h',"help","Dinstall::Options::Help"), -- ('n',"no-action","Dinstall::Options::No-Action"), -- ('p',"no-lock", "Dinstall::Options::No-Lock"), -- ('s',"no-mail", "Dinstall::Options::No-Mail"), -- ('d',"directory", "Dinstall::Options::Directory", "HasArg")] -- -- for i in ["automatic", "help", "no-action", "no-lock", "no-mail", -- "version", "directory"]: -- if not cnf.has_key("Dinstall::Options::%s" % (i)): -- cnf["Dinstall::Options::%s" % (i)] = "" -- -- changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv) -- Options = cnf.SubTree("Dinstall::Options") -- -- if Options["Help"]: -- usage() -- -- # If we have a directory flag, use it to find our files -- if cnf["Dinstall::Options::Directory"] != "": -- # Note that we clobber the list of files we were given in this case -- # so warn if the user has done both -- if len(changes_files) > 0: -- utils.warn("Directory provided so ignoring files given on command line") -- -- changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"]) -- -- return changes_files -- --############################################################################### -- --def usage (exit_code=0): -- print """Usage: dak process-accepted [OPTION]... [CHANGES]... -- -a, --automatic automatic run -- -h, --help show this help and exit. -- -n, --no-action don't do anything -- -p, --no-lock don't check lockfile !! for cron.daily only !! -- -s, --no-mail don't send any mail -- -V, --version display the version number and exit""" -- sys.exit(exit_code) -- --############################################################################### -- --def action (u, stable_queue=None, log_urgency=True, session=None): -- (summary, short_summary) = u.build_summaries() -- pi = u.package_info() -- -- (prompt, answer) = ("", "XXX") -- if Options["No-Action"] or Options["Automatic"]: -- answer = 'S' -- -- if len(u.rejects) > 0: -- print "REJECT\n" + pi -- prompt = "[R]eject, Skip, Quit ?" -- if Options["Automatic"]: -- answer = 'R' -- else: -- print "INSTALL to " + ", ".join(u.pkg.changes["distribution"].keys()) -- print pi + summary, -- prompt = "[I]nstall, Skip, Quit ?" -- if Options["Automatic"]: -- answer = 'I' -- -- while prompt.find(answer) == -1: -- answer = utils.our_raw_input(prompt) -- m = re_default_answer.match(prompt) -- if answer == "": -- answer = m.group(1) -- answer = answer[:1].upper() -- -- if answer == 'R': -- u.do_unaccept() -- Logger.log(["unaccepted", u.pkg.changes_file]) -- elif answer == 'I': -- if stable_queue: -- stable_install(u, summary, short_summary, stable_queue, log_urgency) -- else: -- install(u, session, log_urgency) -- elif answer == 'Q': -- sys.exit(0) -- -- --############################################################################### --def add_poolfile(filename, datadict, location_id, session): -- poolfile = PoolFile() -- poolfile.filename = filename -- poolfile.filesize = datadict["size"] -- poolfile.md5sum = datadict["md5sum"] -- poolfile.sha1sum = datadict["sha1sum"] -- poolfile.sha256sum = datadict["sha256sum"] -- poolfile.location_id = location_id -- -- session.add(poolfile) -- # Flush to get a file id (NB: This is not a commit) -- session.flush() -- -- return poolfile -- --def add_dsc_to_db(u, filename, session): -- entry = u.pkg.files[filename] -- source = DBSource() -- -- source.source = u.pkg.dsc["source"] -- source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch -- source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id -- source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id -- source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id -- source.install_date = datetime.now().date() -- -- dsc_component = entry["component"] -- dsc_location_id = entry["location id"] -- -- source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes") -- -- # Set up a new poolfile if necessary -- if not entry.has_key("files id") or not entry["files id"]: -- filename = entry["pool name"] + filename -- poolfile = add_poolfile(filename, entry, dsc_location_id, session) -- entry["files id"] = poolfile.file_id -- -- source.poolfile_id = entry["files id"] -- session.add(source) -- session.flush() -- -- for suite_name in u.pkg.changes["distribution"].keys(): -- sa = SrcAssociation() -- sa.source_id = source.source_id -- sa.suite_id = get_suite(suite_name).suite_id -- session.add(sa) -- -- session.flush() -- -- # Add the source files to the DB (files and dsc_files) -- dscfile = DSCFile() -- dscfile.source_id = source.source_id -- dscfile.poolfile_id = entry["files id"] -- session.add(dscfile) -- -- for dsc_file, dentry in u.pkg.dsc_files.items(): -- df = DSCFile() -- df.source_id = source.source_id -- -- # If the .orig tarball is already in the pool, it's -- # files id is stored in dsc_files by check_dsc(). -- files_id = dentry.get("files id", None) -- -- # Find the entry in the files hash -- # TODO: Bail out here properly -- dfentry = None -- for f, e in u.pkg.files.items(): -- if f == dsc_file: -- dfentry = e -- break -- -- if files_id is None: -- filename = dfentry["pool name"] + dsc_file -- -- (found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id) -- # FIXME: needs to check for -1/-2 and or handle exception -- if found and obj is not None: -- files_id = obj.file_id -- -- # If still not found, add it -- if files_id is None: -- # HACK: Force sha1sum etc into dentry -- dentry["sha1sum"] = dfentry["sha1sum"] -- dentry["sha256sum"] = dfentry["sha256sum"] -- poolfile = add_poolfile(filename, dentry, dsc_location_id, session) -- files_id = poolfile.file_id -- -- df.poolfile_id = files_id -- session.add(df) -- -- session.flush() -- -- # Add the src_uploaders to the DB -- uploader_ids = [source.maintainer_id] -- if u.pkg.dsc.has_key("uploaders"): -- for up in u.pkg.dsc["uploaders"].split(","): -- up = up.strip() -- uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id) -- -- added_ids = {} -- for up in uploader_ids: -- if added_ids.has_key(up): -- utils.warn("Already saw uploader %s for source %s" % (up, source.source)) -- continue -- -- added_ids[u]=1 -- -- su = SrcUploader() -- su.maintainer_id = up -- su.source_id = source.source_id -- session.add(su) -- -- session.flush() -- -- return dsc_component, dsc_location_id -- --def add_deb_to_db(u, filename, session): -- """ -- Contrary to what you might expect, this routine deals with both -- debs and udebs. That info is in 'dbtype', whilst 'type' is -- 'deb' for both of them -- """ -- cnf = Config() -- entry = u.pkg.files[filename] -- -- bin = DBBinary() -- bin.package = entry["package"] -- bin.version = entry["version"] -- bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id -- bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id -- bin.arch_id = get_architecture(entry["architecture"], session).arch_id -- bin.binarytype = entry["dbtype"] -- -- # Find poolfile id -- filename = entry["pool name"] + filename -- fullpath = os.path.join(cnf["Dir::Pool"], filename) -- if not entry.get("location id", None): -- entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id -- -- if not entry.get("files id", None): -- poolfile = add_poolfile(filename, entry, entry["location id"], session) -- entry["files id"] = poolfile.file_id -- -- bin.poolfile_id = entry["files id"] -- -- # Find source id -- bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session) -- if len(bin_sources) != 1: -- raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \ -- (bin.package, bin.version, bin.architecture.arch_string, -- filename, bin.binarytype, u.pkg.changes["fingerprint"]) -- -- bin.source_id = bin_sources[0].source_id -- -- # Add and flush object so it has an ID -- session.add(bin) -- session.flush() -- -- # Add BinAssociations -- for suite_name in u.pkg.changes["distribution"].keys(): -- ba = BinAssociation() -- ba.binary_id = bin.binary_id -- ba.suite_id = get_suite(suite_name).suite_id -- session.add(ba) -- -- session.flush() -- -- # Deal with contents - disabled for now -- #contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session) -- #if not contents: -- # print "REJECT\nCould not determine contents of package %s" % bin.package -- # session.rollback() -- # raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename) -- -- --def install(u, session, log_urgency=True): -- cnf = Config() -- summarystats = SummaryStats() -- -- print "Installing." -- -- Logger.log(["installing changes", u.pkg.changes_file]) -- -- # Ensure that we have all the hashes we need below. -- u.ensure_hashes() -- if len(u.rejects) > 0: -- # There were errors. Print them and SKIP the changes. -- for msg in u.rejects: -- utils.warn(msg) -- return -- -- # Add the .dsc file to the DB first -- for newfile, entry in u.pkg.files.items(): -- if entry["type"] == "dsc": -- dsc_component, dsc_location_id = add_dsc_to_db(u, newfile, session) -- -- # Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb) -- for newfile, entry in u.pkg.files.items(): -- if entry["type"] == "deb": -- add_deb_to_db(u, newfile, session) -- -- # If this is a sourceful diff only upload that is moving -- # cross-component we need to copy the .orig files into the new -- # component too for the same reasons as above. -- if u.pkg.changes["architecture"].has_key("source"): -- for orig_file in u.pkg.orig_files.keys(): -- if not u.pkg.orig_files[orig_file].has_key("id"): -- continue # Skip if it's not in the pool -- orig_file_id = u.pkg.orig_files[orig_file]["id"] -- if u.pkg.orig_files[orig_file]["location"] == dsc_location_id: -- continue # Skip if the location didn't change -- -- # Do the move -- oldf = get_poolfile_by_id(orig_file_id, session) -- old_filename = os.path.join(oldf.location.path, oldf.filename) -- old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum, -- 'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum} -- -- new_filename = os.path.join(utils.poolify(u.pkg.changes["source"], dsc_component), os.path.basename(old_filename)) -- -- # TODO: Care about size/md5sum collisions etc -- (found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session) -- -- if newf is None: -- utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename)) -- newf = add_poolfile(new_filename, old_dat, dsc_location_id, session) -- -- # TODO: Check that there's only 1 here -- source = get_sources_from_name(u.pkg.changes["source"], u.pkg.changes["version"])[0] -- dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0] -- dscf.poolfile_id = newf.file_id -- session.add(dscf) -- session.flush() -- -- # Install the files into the pool -- for newfile, entry in u.pkg.files.items(): -- destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile) -- utils.move(newfile, destination) -- Logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]]) -- summarystats.accept_bytes += float(entry["size"]) -- -- # Copy the .changes file across for suite which need it. -- copy_changes = {} -- copy_dot_dak = {} -- for suite_name in u.pkg.changes["distribution"].keys(): -- if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)): -- copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = "" -- # and the .dak file... -- if cnf.has_key("Suite::%s::CopyDotDak" % (suite_name)): -- copy_dot_dak[cnf["Suite::%s::CopyDotDak" % (suite_name)]] = "" -- -- for dest in copy_changes.keys(): -- utils.copy(u.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest)) -- -- for dest in copy_dot_dak.keys(): -- utils.copy(u.pkg.changes_file[:-8]+".dak", dest) -- -- # We're done - commit the database changes -- session.commit() -- -- # Move the .changes into the 'done' directory -- utils.move(u.pkg.changes_file, -- os.path.join(cnf["Dir::Queue::Done"], os.path.basename(u.pkg.changes_file))) -- -- # Remove the .dak file -- os.unlink(u.pkg.changes_file[:-8] + ".dak") -- -- if u.pkg.changes["architecture"].has_key("source") and log_urgency: -- UrgencyLog().log(u.pkg.dsc["source"], u.pkg.dsc["version"], u.pkg.changes["urgency"]) -- -- # Our SQL session will automatically start a new transaction after -- # the last commit -- -- # Undo the work done in queue.py(accept) to help auto-building -- # from accepted. -- now_date = datetime.now() -- -- for suite_name in u.pkg.changes["distribution"].keys(): -- if suite_name not in cnf.ValueList("Dinstall::QueueBuildSuites"): -- continue -- -- suite = get_suite(suite_name, session) -- dest_dir = cnf["Dir::QueueBuild"] -- -- if cnf.FindB("Dinstall::SecurityQueueBuild"): -- dest_dir = os.path.join(dest_dir, suite_name) -- -- for newfile, entry in u.pkg.files.items(): -- dest = os.path.join(dest_dir, newfile) -- -- qb = get_queue_build(dest, suite.suite_id, session) -- -- # Remove it from the list of packages for later processing by apt-ftparchive -- if qb: -- qb.last_used = now_date -- qb.in_queue = False -- session.add(qb) -- -- if not cnf.FindB("Dinstall::SecurityQueueBuild"): -- # Update the symlink to point to the new location in the pool -- pool_location = utils.poolify(u.pkg.changes["source"], entry["component"]) -- src = os.path.join(cnf["Dir::Pool"], pool_location, os.path.basename(newfile)) -- if os.path.islink(dest): -- os.unlink(dest) -- os.symlink(src, dest) -- -- # Update last_used on any non-uploaded .orig symlink -- for orig_file in u.pkg.orig_files.keys(): -- # Determine the .orig.tar.gz file name -- if not u.pkg.orig_files[orig_file].has_key("id"): -- continue # Skip files not in the pool -- # XXX: do we really want to update the orig_files dict here -- # instead of using a temporary variable? -- u.pkg.orig_files[orig_file]["path"] = os.path.join(dest_dir, orig_file) -- -- # Remove it from the list of packages for later processing by apt-ftparchive -- qb = get_queue_build(u.pkg.orig_files[orig_file]["path"], suite.suite_id, session) -- if qb: -- qb.in_queue = False -- qb.last_used = now_date -- session.add(qb) -- -- session.commit() -- -- # Finally... -- summarystats.accept_count += 1 -- --################################################################################ -- --def stable_install(u, session, summary, short_summary, fromsuite_name="proposed-updates"): -- summarystats = SummaryStats() -- -- fromsuite_name = fromsuite_name.lower() -- tosuite_name = "Stable" -- if fromsuite_name == "oldstable-proposed-updates": -- tosuite_name = "OldStable" -- -- print "Installing from %s to %s." % (fromsuite_name, tosuite_name) -- -- fromsuite = get_suite(fromsuite_name) -- tosuite = get_suite(tosuite_name) -- -- # Add the source to stable (and remove it from proposed-updates) -- for newfile, entry in u.pkg.files.items(): -- if entry["type"] == "dsc": -- package = u.pkg.dsc["source"] -- # NB: not files[file]["version"], that has no epoch -- version = u.pkg.dsc["version"] -- -- source = get_sources_from_name(package, version, session) -- if len(source) < 1: -- utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s) in source table." % (package, version)) -- source = source[0] -- -- # Remove from old suite -- old = session.query(SrcAssociation).filter_by(source_id = source.source_id) -- old = old.filter_by(suite_id = fromsuite.suite_id) -- old.delete() -- -- # Add to new suite -- new = SrcAssociation() -- new.source_id = source.source_id -- new.suite_id = tosuite.suite_id -- session.add(new) -- -- # Add the binaries to stable (and remove it/them from proposed-updates) -- for newfile, entry in u.pkg.files.items(): -- if entry["type"] == "deb": -- package = entry["package"] -- version = entry["version"] -- architecture = entry["architecture"] -- -- binary = get_binaries_from_name(package, version, [architecture, 'all']) -- -- if len(binary) < 1: -- utils.fubar("[INTERNAL ERROR] couldn't find '%s' (%s for %s architecture) in binaries table." % (package, version, architecture)) -- binary = binary[0] -- -- # Remove from old suite -- old = session.query(BinAssociation).filter_by(binary_id = binary.binary_id) -- old = old.filter_by(suite_id = fromsuite.suite_id) -- old.delete() -- -- # Add to new suite -- new = BinAssociation() -- new.binary_id = binary.binary_id -- new.suite_id = tosuite.suite_id -- session.add(new) -- -- session.commit() -- -- utils.move(u.pkg.changes_file, -- os.path.join(cnf["Dir::Morgue"], 'process-accepted', os.path.basename(u.pkg.changes_file))) -- -- ## Update the Stable ChangeLog file -- # TODO: URGH - Use a proper tmp file -- new_changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + ".ChangeLog" -- changelog_filename = cnf["Dir::Root"] + cnf["Suite::%s::ChangeLogBase" % (tosuite.suite_name)] + "ChangeLog" -- if os.path.exists(new_changelog_filename): -- os.unlink(new_changelog_filename) -- -- new_changelog = utils.open_file(new_changelog_filename, 'w') -- for newfile, entry in u.pkg.files.items(): -- if entry["type"] == "deb": -- new_changelog.write("%s/%s/binary-%s/%s\n" % (tosuite.suite_name, -- entry["component"], -- entry["architecture"], -- newfile)) -- elif re_issource.match(newfile): -- new_changelog.write("%s/%s/source/%s\n" % (tosuite.suite_name, -- entry["component"], -- newfile)) -- else: -- new_changelog.write("%s\n" % (newfile)) -- -- chop_changes = re_fdnic.sub("\n", u.pkg.changes["changes"]) -- new_changelog.write(chop_changes + '\n\n') -- -- if os.access(changelog_filename, os.R_OK) != 0: -- changelog = utils.open_file(changelog_filename) -- new_changelog.write(changelog.read()) -- -- new_changelog.close() -- -- if os.access(changelog_filename, os.R_OK) != 0: -- os.unlink(changelog_filename) -- utils.move(new_changelog_filename, changelog_filename) -- -- summarystats.accept_count += 1 -- -- if not Options["No-Mail"] and u.pkg.changes["architecture"].has_key("source"): -- u.Subst["__SUITE__"] = " into %s" % (tosuite) -- u.Subst["__SUMMARY__"] = summary -- u.Subst["__BCC__"] = "X-DAK: dak process-accepted" -- -- if cnf.has_key("Dinstall::Bcc"): -- u.Subst["__BCC__"] += "\nBcc: %s" % (cnf["Dinstall::Bcc"]) -- -- template = os.path.join(cnf["Dir::Templates"], 'process-accepted.install') -- -- mail_message = utils.TemplateSubst(u.Subst, template) -- utils.send_mail(mail_message) -- u.announce(short_summary, True) -- -- # Finally remove the .dak file -- dot_dak_file = os.path.join(cnf["Suite::%s::CopyDotDak" % (fromsuite.suite_name)], -- os.path.basename(u.pkg.changes_file[:-8]+".dak")) -- os.unlink(dot_dak_file) -- --################################################################################ -- --def process_it(changes_file, stable_queue, log_urgency, session): -- cnf = Config() -- u = Upload() -- -- overwrite_checks = True -- -- # Absolutize the filename to avoid the requirement of being in the -- # same directory as the .changes file. -- cfile = os.path.abspath(changes_file) -- -- # And since handling of installs to stable munges with the CWD -- # save and restore it. -- u.prevdir = os.getcwd() -- -- if stable_queue: -- old = cfile -- cfile = os.path.basename(old) -- os.chdir(cnf["Suite::%s::CopyDotDak" % (stable_queue)]) -- # overwrite_checks should not be performed if installing to stable -- overwrite_checks = False -- -- u.pkg.load_dot_dak(cfile) -- u.update_subst() -- -- if stable_queue: -- u.pkg.changes_file = old -- -- u.accepted_checks(overwrite_checks, session) -- action(u, stable_queue, log_urgency, session) -- -- # Restore CWD -- os.chdir(u.prevdir) -- --############################################################################### -- --def main(): -- global Logger -- -- cnf = Config() -- summarystats = SummaryStats() -- changes_files = init() -- log_urgency = False -- stable_queue = None -- -- # -n/--dry-run invalidates some other options which would involve things happening -- if Options["No-Action"]: -- Options["Automatic"] = "" -- -- # Check that we aren't going to clash with the daily cron job -- -- if not Options["No-Action"] and os.path.exists("%s/Archive_Maintenance_In_Progress" % (cnf["Dir::Root"])) and not Options["No-Lock"]: -- utils.fubar("Archive maintenance in progress. Try again later.") -- -- # If running from within proposed-updates; assume an install to stable -- queue = "" -- if os.getenv('PWD').find('oldstable-proposed-updates') != -1: -- stable_queue = "Oldstable-Proposed-Updates" -- elif os.getenv('PWD').find('proposed-updates') != -1: -- stable_queue = "Proposed-Updates" -- -- # Obtain lock if not in no-action mode and initialize the log -- if not Options["No-Action"]: -- lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) -- try: -- fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) -- except IOError, e: -- if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN': -- utils.fubar("Couldn't obtain lock; assuming another 'dak process-accepted' is already running.") -- else: -- raise -- Logger = daklog.Logger(cnf, "process-accepted") -- if not stable_queue and cnf.get("Dir::UrgencyLog"): -- # Initialise UrgencyLog() -- log_urgency = True -- UrgencyLog() -- -- # Sort the .changes files so that we process sourceful ones first -- changes_files.sort(utils.changes_compare) -- -- -- # Process the changes files -- for changes_file in changes_files: -- print "\n" + changes_file -- session = DBConn().session() -- process_it(changes_file, stable_queue, log_urgency, session) -- session.close() -- -- if summarystats.accept_count: -- sets = "set" -- if summarystats.accept_count > 1: -- sets = "sets" -- sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets, -- utils.size_type(int(summarystats.accept_bytes)))) -- Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes]) -- -- if not Options["No-Action"]: -- Logger.close() -- if log_urgency: -- UrgencyLog().close() -- --############################################################################### -- --if __name__ == '__main__': -- main() diff --cc dak/process_unchecked.py index 8a3e49d1,8a3e49d1..00000000 deleted file mode 100755,100755 --- a/dak/process_unchecked.py +++ /dev/null @@@ -1,593 -1,593 +1,0 @@@ --#!/usr/bin/env python -- --""" --Checks Debian packages from Incoming --@contact: Debian FTP Master --@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup --@copyright: 2009 Joerg Jaspert --@copyright: 2009 Mark Hymers --@license: GNU General Public License version 2 or later --""" -- --# This program is free software; you can redistribute it and/or modify --# it under the terms of the GNU General Public License as published by --# the Free Software Foundation; either version 2 of the License, or --# (at your option) any later version. -- --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. -- --# You should have received a copy of the GNU General Public License --# along with this program; if not, write to the Free Software --# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -- --# Originally based on dinstall by Guy Maor -- --################################################################################ -- --# Computer games don't affect kids. I mean if Pacman affected our generation as --# kids, we'd all run around in a darkened room munching pills and listening to --# repetitive music. --# -- Unknown -- --################################################################################ -- --import errno --import fcntl --import os --import sys --import traceback --import apt_pkg -- --from daklib.dbconn import * --from daklib import daklog --from daklib.queue import * --from daklib import utils --from daklib.textutils import fix_maintainer --from daklib.dak_exceptions import * --from daklib.regexes import re_default_answer --from daklib.summarystats import SummaryStats --from daklib.holding import Holding --from daklib.config import Config -- --from types import * -- --################################################################################ -- -- --################################################################################ -- --# Globals --Options = None --Logger = None -- --############################################################################### -- --def init(): -- global Options -- -- apt_pkg.init() -- cnf = Config() -- -- Arguments = [('a',"automatic","Dinstall::Options::Automatic"), -- ('h',"help","Dinstall::Options::Help"), -- ('n',"no-action","Dinstall::Options::No-Action"), -- ('p',"no-lock", "Dinstall::Options::No-Lock"), -- ('s',"no-mail", "Dinstall::Options::No-Mail"), -- ('d',"directory", "Dinstall::Options::Directory", "HasArg")] -- -- for i in ["automatic", "help", "no-action", "no-lock", "no-mail", -- "override-distribution", "version", "directory"]: -- cnf["Dinstall::Options::%s" % (i)] = "" -- -- changes_files = apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv) -- Options = cnf.SubTree("Dinstall::Options") -- -- if Options["Help"]: -- usage() -- -- # If we have a directory flag, use it to find our files -- if cnf["Dinstall::Options::Directory"] != "": -- # Note that we clobber the list of files we were given in this case -- # so warn if the user has done both -- if len(changes_files) > 0: -- utils.warn("Directory provided so ignoring files given on command line") -- -- changes_files = utils.get_changes_files(cnf["Dinstall::Options::Directory"]) -- -- return changes_files -- --################################################################################ -- --def usage (exit_code=0): -- print """Usage: dak process-unchecked [OPTION]... [CHANGES]... -- -a, --automatic automatic run -- -h, --help show this help and exit. -- -n, --no-action don't do anything -- -p, --no-lock don't check lockfile !! for cron.daily only !! -- -s, --no-mail don't send any mail -- -V, --version display the version number and exit""" -- sys.exit(exit_code) -- --################################################################################ -- --def action(u): -- cnf = Config() -- -- # changes["distribution"] may not exist in corner cases -- # (e.g. unreadable changes files) -- if not u.pkg.changes.has_key("distribution") or not isinstance(u.pkg.changes["distribution"], DictType): -- u.pkg.changes["distribution"] = {} -- -- (summary, short_summary) = u.build_summaries() -- -- # q-unapproved hax0ring -- queue_info = { -- "New": { "is": is_new, "process": acknowledge_new }, -- "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand }, -- "Byhand" : { "is": is_byhand, "process": do_byhand }, -- "OldStableUpdate" : { "is": is_oldstableupdate, -- "process": do_oldstableupdate }, -- "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate }, -- "Unembargo" : { "is": is_unembargo, "process": queue_unembargo }, -- "Embargo" : { "is": is_embargo, "process": queue_embargo }, -- } -- -- queues = [ "New", "Autobyhand", "Byhand" ] -- if cnf.FindB("Dinstall::SecurityQueueHandling"): -- queues += [ "Unembargo", "Embargo" ] -- else: -- queues += [ "OldStableUpdate", "StableUpdate" ] -- -- (prompt, answer) = ("", "XXX") -- if Options["No-Action"] or Options["Automatic"]: -- answer = 'S' -- -- queuekey = '' -- -- pi = u.package_info() -- -- if len(u.rejects) > 0: -- if u.upload_too_new(): -- print "SKIP (too new)\n" + pi, -- prompt = "[S]kip, Quit ?" -- else: -- print "REJECT\n" + pi -- prompt = "[R]eject, Skip, Quit ?" -- if Options["Automatic"]: -- answer = 'R' -- else: -- qu = None -- for q in queues: -- if queue_info[q]["is"](u): -- qu = q -- break -- if qu: -- print "%s for %s\n%s%s" % ( qu.upper(), ", ".join(u.pkg.changes["distribution"].keys()), pi, summary) -- queuekey = qu[0].upper() -- if queuekey in "RQSA": -- queuekey = "D" -- prompt = "[D]ivert, Skip, Quit ?" -- else: -- prompt = "[%s]%s, Skip, Quit ?" % (queuekey, qu[1:].lower()) -- if Options["Automatic"]: -- answer = queuekey -- else: -- print "ACCEPT\n" + pi + summary, -- prompt = "[A]ccept, Skip, Quit ?" -- if Options["Automatic"]: -- answer = 'A' -- -- while prompt.find(answer) == -1: -- answer = utils.our_raw_input(prompt) -- m = re_default_answer.match(prompt) -- if answer == "": -- answer = m.group(1) -- answer = answer[:1].upper() -- -- if answer == 'R': -- os.chdir(u.pkg.directory) -- u.do_reject(0, pi) -- elif answer == 'A': -- u.pkg.add_known_changes( "Accepted" ) -- u.accept(summary, short_summary) -- u.check_override() -- u.remove() -- elif answer == queuekey: -- u.pkg.add_known_changes( qu ) -- queue_info[qu]["process"](u, summary, short_summary) -- u.remove() -- elif answer == 'Q': -- sys.exit(0) -- --################################################################################ -- --def package_to_suite(u, suite): -- if not u.pkg.changes["distribution"].has_key(suite): -- return False -- -- ret = True -- -- if not u.pkg.changes["architecture"].has_key("source"): -- s = DBConn().session() -- q = s.query(SrcAssociation.sa_id) -- q = q.join(Suite).filter_by(suite_name=suite) -- q = q.join(DBSource).filter_by(source=u.pkg.changes['source']) -- q = q.filter_by(version=u.pkg.changes['version']).limit(1) -- -- # NB: Careful, this logic isn't what you would think it is -- # Source is already in {old-,}proposed-updates so no need to hold -- # Instead, we don't move to the holding area, we just do an ACCEPT -- if q.count() > 0: -- ret = False -- -- s.close() -- -- return ret -- --def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None): -- cnf = Config() -- dir = cnf["Dir::Queue::%s" % queue] -- -- print "Moving to %s holding area" % queue.upper() -- Logger.log(["Moving to %s" % queue, u.pkg.changes_file]) -- -- u.pkg.write_dot_dak(dir) -- u.move_to_dir(dir, perms=perms) -- if build: -- get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir) -- -- # Check for override disparities -- u.check_override() -- -- # Send accept mail, announce to lists and close bugs -- if announce and not cnf["Dinstall::Options::No-Mail"]: -- template = os.path.join(cnf["Dir::Templates"], announce) -- u.update_subst() -- u.Subst["__SUITE__"] = "" -- mail_message = utils.TemplateSubst(u.Subst, template) -- utils.send_mail(mail_message) -- u.announce(short_summary, True) -- --################################################################################ -- --def is_unembargo(u): -- session = DBConn().session() -- cnf = Config() -- -- q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes) -- if q.rowcount > 0: -- session.close() -- return True -- -- oldcwd = os.getcwd() -- os.chdir(cnf["Dir::Queue::Disembargo"]) -- disdir = os.getcwd() -- os.chdir(oldcwd) -- -- ret = False -- -- if u.pkg.directory == disdir: -- if u.pkg.changes["architecture"].has_key("source"): -- if not Options["No-Action"]: -- session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes) -- session.commit() -- -- ret = True -- -- session.close() -- -- return ret -- --def queue_unembargo(u, summary, short_summary): -- return package_to_queue(u, summary, short_summary, "Unembargoed", -- perms=0660, build=True, announce='process-unchecked.accepted') -- --################################################################################ -- --def is_embargo(u): -- # if embargoed queues are enabled always embargo -- return True -- --def queue_embargo(u, summary, short_summary): -- return package_to_queue(u, summary, short_summary, "Unembargoed", -- perms=0660, build=True, announce='process-unchecked.accepted') -- --################################################################################ -- --def is_stableupdate(u): -- return package_to_suite(u, 'proposed-updates') -- --def do_stableupdate(u, summary, short_summary): -- return package_to_queue(u, summary, short_summary, "ProposedUpdates", -- perms=0664, build=False, announce=None) -- --################################################################################ -- --def is_oldstableupdate(u): -- return package_to_suite(u, 'oldstable-proposed-updates') -- --def do_oldstableupdate(u, summary, short_summary): -- return package_to_queue(u, summary, short_summary, "OldProposedUpdates", -- perms=0664, build=False, announce=None) -- --################################################################################ -- --def is_autobyhand(u): -- cnf = Config() -- -- all_auto = 1 -- any_auto = 0 -- for f in u.pkg.files.keys(): -- if u.pkg.files[f].has_key("byhand"): -- any_auto = 1 -- -- # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH -- # don't contain underscores, and ARCH doesn't contain dots. -- # further VER matches the .changes Version:, and ARCH should be in -- # the .changes Architecture: list. -- if f.count("_") < 2: -- all_auto = 0 -- continue -- -- (pckg, ver, archext) = f.split("_", 2) -- if archext.count(".") < 1 or u.pkg.changes["version"] != ver: -- all_auto = 0 -- continue -- -- ABH = cnf.SubTree("AutomaticByHandPackages") -- if not ABH.has_key(pckg) or \ -- ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]: -- print "not match %s %s" % (pckg, u.pkg.changes["source"]) -- all_auto = 0 -- continue -- -- (arch, ext) = archext.split(".", 1) -- if arch not in u.pkg.changes["architecture"]: -- all_auto = 0 -- continue -- -- u.pkg.files[f]["byhand-arch"] = arch -- u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)] -- -- return any_auto and all_auto -- --def do_autobyhand(u, summary, short_summary): -- print "Attempting AUTOBYHAND." -- byhandleft = True -- for f, entry in u.pkg.files.items(): -- byhandfile = f -- -- if not entry.has_key("byhand"): -- continue -- -- if not entry.has_key("byhand-script"): -- byhandleft = True -- continue -- -- os.system("ls -l %s" % byhandfile) -- -- result = os.system("%s %s %s %s %s" % ( -- entry["byhand-script"], -- byhandfile, -- u.pkg.changes["version"], -- entry["byhand-arch"], -- os.path.abspath(u.pkg.changes_file))) -- -- if result == 0: -- os.unlink(byhandfile) -- del entry -- else: -- print "Error processing %s, left as byhand." % (f) -- byhandleft = True -- -- if byhandleft: -- do_byhand(u, summary, short_summary) -- else: -- u.accept(summary, short_summary) -- u.check_override() -- # XXX: We seem to be missing a u.remove() here -- # This might explain why we get byhand leftovers in unchecked - mhy -- --################################################################################ -- --def is_byhand(u): -- for f in u.pkg.files.keys(): -- if u.pkg.files[f].has_key("byhand"): -- return True -- return False -- --def do_byhand(u, summary, short_summary): -- return package_to_queue(u, summary, short_summary, "Byhand", -- perms=0660, build=False, announce=None) -- --################################################################################ -- --def is_new(u): -- for f in u.pkg.files.keys(): -- if u.pkg.files[f].has_key("new"): -- return True -- return False -- --def acknowledge_new(u, summary, short_summary): -- cnf = Config() -- -- print "Moving to NEW holding area." -- Logger.log(["Moving to new", u.pkg.changes_file]) -- -- u.pkg.write_dot_dak(cnf["Dir::Queue::New"]) -- u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644) -- -- if not Options["No-Mail"]: -- print "Sending new ack." -- template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new') -- u.update_subst() -- u.Subst["__SUMMARY__"] = summary -- new_ack_message = utils.TemplateSubst(u.Subst, template) -- utils.send_mail(new_ack_message) -- --################################################################################ -- --# reprocess is necessary for the case of foo_1.2-1 and foo_1.2-2 in --# Incoming. -1 will reference the .orig.tar.gz, but -2 will not. --# Upload.check_dsc_against_db() can find the .orig.tar.gz but it will --# not have processed it during it's checks of -2. If -1 has been --# deleted or otherwise not checked by 'dak process-unchecked', the --# .orig.tar.gz will not have been checked at all. To get round this, --# we force the .orig.tar.gz into the .changes structure and reprocess --# the .changes file. -- --def process_it(changes_file): -- global Logger -- -- cnf = Config() -- -- holding = Holding() -- -- u = Upload() -- u.pkg.changes_file = changes_file -- u.pkg.directory = os.getcwd() -- u.logger = Logger -- origchanges = os.path.join(u.pkg.directory, u.pkg.changes_file) -- -- # Some defaults in case we can't fully process the .changes file -- u.pkg.changes["maintainer2047"] = cnf["Dinstall::MyEmailAddress"] -- u.pkg.changes["changedby2047"] = cnf["Dinstall::MyEmailAddress"] -- -- # debian-{devel-,}-changes@lists.debian.org toggles writes access based on this header -- bcc = "X-DAK: dak process-unchecked" -- if cnf.has_key("Dinstall::Bcc"): -- u.Subst["__BCC__"] = bcc + "\nBcc: %s" % (cnf["Dinstall::Bcc"]) -- else: -- u.Subst["__BCC__"] = bcc -- -- # Remember where we are so we can come back after cd-ing into the -- # holding directory. TODO: Fix this stupid hack -- u.prevdir = os.getcwd() -- -- # TODO: Figure out something better for this (or whether it's even -- # necessary - it seems to have been for use when we were -- # still doing the is_unchecked check; reprocess = 2) -- u.reprocess = 1 -- -- try: -- # If this is the Real Thing(tm), copy things into a private -- # holding directory first to avoid replacable file races. -- if not Options["No-Action"]: -- os.chdir(cnf["Dir::Queue::Holding"]) -- -- # Absolutize the filename to avoid the requirement of being in the -- # same directory as the .changes file. -- holding.copy_to_holding(origchanges) -- -- # Relativize the filename so we use the copy in holding -- # rather than the original... -- changespath = os.path.basename(u.pkg.changes_file) -- -- (u.pkg.changes["fingerprint"], rejects) = utils.check_signature(changespath) -- -- if u.pkg.changes["fingerprint"]: -- valid_changes_p = u.load_changes(changespath) -- else: -- valid_changes_p = False -- u.rejects.extend(rejects) -- -- if valid_changes_p: -- while u.reprocess: -- u.check_distributions() -- u.check_files(not Options["No-Action"]) -- valid_dsc_p = u.check_dsc(not Options["No-Action"]) -- if valid_dsc_p and not Options["No-Action"]: -- u.check_source() -- u.check_lintian() -- u.check_hashes() -- u.check_urgency() -- u.check_timestamps() -- u.check_signed_by_key() -- -- action(u) -- -- except (SystemExit, KeyboardInterrupt): -- raise -- -- except: -- print "ERROR" -- traceback.print_exc(file=sys.stderr) -- -- # Restore previous WD -- os.chdir(u.prevdir) -- --############################################################################### -- --def main(): -- global Options, Logger -- -- cnf = Config() -- changes_files = init() -- -- # -n/--dry-run invalidates some other options which would involve things happening -- if Options["No-Action"]: -- Options["Automatic"] = "" -- -- # Initialize our Holding singleton -- holding = Holding() -- -- # Ensure all the arguments we were given are .changes files -- for f in changes_files: -- if not f.endswith(".changes"): -- utils.warn("Ignoring '%s' because it's not a .changes file." % (f)) -- changes_files.remove(f) -- -- if changes_files == []: -- if cnf["Dinstall::Options::Directory"] == "": -- utils.fubar("Need at least one .changes file as an argument.") -- else: -- sys.exit(0) -- -- # Check that we aren't going to clash with the daily cron job -- if not Options["No-Action"] and os.path.exists("%s/daily.lock" % (cnf["Dir::Lock"])) and not Options["No-Lock"]: -- utils.fubar("Archive maintenance in progress. Try again later.") -- -- # Obtain lock if not in no-action mode and initialize the log -- if not Options["No-Action"]: -- lock_fd = os.open(cnf["Dinstall::LockFile"], os.O_RDWR | os.O_CREAT) -- try: -- fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) -- except IOError, e: -- if errno.errorcode[e.errno] == 'EACCES' or errno.errorcode[e.errno] == 'EAGAIN': -- utils.fubar("Couldn't obtain lock; assuming another 'dak process-unchecked' is already running.") -- else: -- raise -- Logger = daklog.Logger(cnf, "process-unchecked") -- -- # Sort the .changes files so that we process sourceful ones first -- changes_files.sort(utils.changes_compare) -- -- # Process the changes files -- for changes_file in changes_files: -- print "\n" + changes_file -- try: -- process_it (changes_file) -- finally: -- if not Options["No-Action"]: -- holding.clean() -- -- accept_count = SummaryStats().accept_count -- accept_bytes = SummaryStats().accept_bytes -- -- if accept_count: -- sets = "set" -- if accept_count > 1: -- sets = "sets" -- print "Accepted %d package %s, %s." % (accept_count, sets, utils.size_type(int(accept_bytes))) -- Logger.log(["total",accept_count,accept_bytes]) -- -- if not Options["No-Action"]: -- Logger.close() -- --################################################################################ -- --if __name__ == '__main__': -- main() diff --cc daklib/queue.py index 1694deb4,44f63263..effbb4e0 --- a/daklib/queue.py +++ b/daklib/queue.py @@@ -1923,13 -1962,16 +1962,17 @@@ distribution."" os.rename(temp_filename, filename) os.chmod(filename, 0644) - # auto-build queue -# res = get_or_set_queue('buildd', session).autobuild_upload(self.pkg, session) -# if res: -# utils.fubar(res) -# now_date = datetime.now() + # This routine returns None on success or an error on failure + # TODO: Replace queue copying using the new queue.add_file_from_pool routine + # and by looking up which queues in suite.copy_queues + #res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"]) + #if res: + # utils.fubar(res) + session.commit() + + # Finally... + stats.accept_count += 1 def check_override(self): """ @@@ -1968,15 -2010,21 +2011,21 @@@ def remove(self, from_dir=None): """ Used (for instance) in p-u to remove the package from unchecked + + Also removes the package from holding area. """ if from_dir is None: - os.chdir(self.pkg.directory) - else: - os.chdir(from_dir) + from_dir = self.pkg.directory + h = Holding() for f in self.pkg.files.keys(): - os.unlink(f) - os.unlink(self.pkg.changes_file) + os.unlink(os.path.join(from_dir, f)) + if os.path.exists(os.path.join(h.holding_dir, f)): + os.unlink(os.path.join(h.holding_dir, f)) - ++ + os.unlink(os.path.join(from_dir, self.pkg.changes_file)) + if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)): + os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file)) ########################################################################### diff --cc daklib/queue_install.py index 00000000,9b78b264..c8fa39e0 mode 000000,100644..100644 --- a/daklib/queue_install.py +++ b/daklib/queue_install.py @@@ -1,0 -1,286 +1,286 @@@ + #!/usr/bin/env python + # vim:set et sw=4: + + """ + Utility functions for process-upload + + @contact: Debian FTP Master + @copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup + @copyright: 2009 Joerg Jaspert + @copyright: 2009 Mark Hymers + @license: GNU General Public License version 2 or later + """ + + # This program is free software; you can redistribute it and/or modify + # it under the terms of the GNU General Public License as published by + # the Free Software Foundation; either version 2 of the License, or + # (at your option) any later version. + + # This program is distributed in the hope that it will be useful, + # but WITHOUT ANY WARRANTY; without even the implied warranty of + # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + # GNU General Public License for more details. + + # You should have received a copy of the GNU General Public License + # along with this program; if not, write to the Free Software + # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + import os + + from daklib import utils + from daklib.dbconn import * + from daklib.config import Config + + ############################################################################### + + def determine_target(u): + cnf = Config() - ++ + queues = [ "New", "Autobyhand", "Byhand" ] + if cnf.FindB("Dinstall::SecurityQueueHandling"): + queues += [ "Unembargo", "Embargo" ] + else: + queues += [ "OldStableUpdate", "StableUpdate" ] + + target = None + for q in queues: + if QueueInfo[q]["is"](u): + target = q + break + + return target + + ################################################################################ + + def package_to_suite(u, suite): + if not u.pkg.changes["distribution"].has_key(suite): + return False + + ret = True + + if not u.pkg.changes["architecture"].has_key("source"): + s = DBConn().session() + q = s.query(SrcAssociation.sa_id) + q = q.join(Suite).filter_by(suite_name=suite) + q = q.join(DBSource).filter_by(source=u.pkg.changes['source']) + q = q.filter_by(version=u.pkg.changes['version']).limit(1) + + # NB: Careful, this logic isn't what you would think it is + # Source is already in {old-,}proposed-updates so no need to hold + # Instead, we don't move to the holding area, we just do an ACCEPT + if q.count() > 0: + ret = False + + s.close() + + return ret + + def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None): + cnf = Config() + dir = cnf["Dir::Queue::%s" % queue] + + print "Moving to %s holding area" % queue.upper() + u.logger.log(["Moving to %s" % queue, u.pkg.changes_file]) + + u.move_to_dir(dir, perms=perms) + if build: + get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir) + + # Check for override disparities + u.check_override() + + # Send accept mail, announce to lists and close bugs + if announce and not cnf["Dinstall::Options::No-Mail"]: + template = os.path.join(cnf["Dir::Templates"], announce) + u.update_subst() + u.Subst["__SUITE__"] = "" + mail_message = utils.TemplateSubst(u.Subst, template) + utils.send_mail(mail_message) + u.announce(short_summary, True) + + ################################################################################ + + def is_unembargo(u): + session = DBConn().session() + cnf = Config() + + q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes) + if q.rowcount > 0: + session.close() + return True + + oldcwd = os.getcwd() + os.chdir(cnf["Dir::Queue::Disembargo"]) + disdir = os.getcwd() + os.chdir(oldcwd) + + ret = False + + if u.pkg.directory == disdir: + if u.pkg.changes["architecture"].has_key("source"): + session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes) + session.commit() + + ret = True + + session.close() + + return ret + + def queue_unembargo(u, summary, short_summary, session=None): + return package_to_queue(u, summary, short_summary, "Unembargoed", + perms=0660, build=True, announce='process-unchecked.accepted') + + ################################################################################ + + def is_embargo(u): + # if embargoed queues are enabled always embargo + return True + + def queue_embargo(u, summary, short_summary, session=None): + return package_to_queue(u, summary, short_summary, "Unembargoed", + perms=0660, build=True, announce='process-unchecked.accepted') + + ################################################################################ + + def is_stableupdate(u): + return package_to_suite(u, 'proposed-updates') + + def do_stableupdate(u, summary, short_summary, session=None): + return package_to_queue(u, summary, short_summary, "ProposedUpdates", + perms=0664, build=False, announce=None) + + ################################################################################ + + def is_oldstableupdate(u): + return package_to_suite(u, 'oldstable-proposed-updates') + + def do_oldstableupdate(u, summary, short_summary, session=None): + return package_to_queue(u, summary, short_summary, "OldProposedUpdates", + perms=0664, build=False, announce=None) + + ################################################################################ + + def is_autobyhand(u): + cnf = Config() + + all_auto = 1 + any_auto = 0 + for f in u.pkg.files.keys(): + if u.pkg.files[f].has_key("byhand"): + any_auto = 1 + + # filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH + # don't contain underscores, and ARCH doesn't contain dots. + # further VER matches the .changes Version:, and ARCH should be in + # the .changes Architecture: list. + if f.count("_") < 2: + all_auto = 0 + continue + + (pckg, ver, archext) = f.split("_", 2) + if archext.count(".") < 1 or u.pkg.changes["version"] != ver: + all_auto = 0 + continue + + ABH = cnf.SubTree("AutomaticByHandPackages") + if not ABH.has_key(pckg) or \ + ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]: + print "not match %s %s" % (pckg, u.pkg.changes["source"]) + all_auto = 0 + continue + + (arch, ext) = archext.split(".", 1) + if arch not in u.pkg.changes["architecture"]: + all_auto = 0 + continue + + u.pkg.files[f]["byhand-arch"] = arch + u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)] + + return any_auto and all_auto + + def do_autobyhand(u, summary, short_summary, session=None): + print "Attempting AUTOBYHAND." + byhandleft = True + for f, entry in u.pkg.files.items(): + byhandfile = f + + if not entry.has_key("byhand"): + continue + + if not entry.has_key("byhand-script"): + byhandleft = True + continue + + os.system("ls -l %s" % byhandfile) + + result = os.system("%s %s %s %s %s" % ( + entry["byhand-script"], + byhandfile, + u.pkg.changes["version"], + entry["byhand-arch"], + os.path.abspath(u.pkg.changes_file))) + + if result == 0: + os.unlink(byhandfile) + del entry + else: + print "Error processing %s, left as byhand." % (f) + byhandleft = True + + if byhandleft: + do_byhand(u, summary, short_summary, session) + else: + u.accept(summary, short_summary, session) + u.check_override() + + ################################################################################ + + def is_byhand(u): + for f in u.pkg.files.keys(): + if u.pkg.files[f].has_key("byhand"): + return True + return False + + def do_byhand(u, summary, short_summary, session=None): + return package_to_queue(u, summary, short_summary, "Byhand", + perms=0660, build=False, announce=None) + + ################################################################################ + + def is_new(u): + for f in u.pkg.files.keys(): + if u.pkg.files[f].has_key("new"): + return True + return False + + def acknowledge_new(u, summary, short_summary, session=None): + cnf = Config() + + print "Moving to NEW queue." + u.logger.log(["Moving to new", u.pkg.changes_file]) + + u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644) + + if not cnf["Dinstall::Options::No-Mail"]: + print "Sending new ack." + template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new') + u.update_subst() + u.Subst["__SUMMARY__"] = summary + new_ack_message = utils.TemplateSubst(u.Subst, template) + utils.send_mail(new_ack_message) + + ################################################################################ + + # q-unapproved hax0ring + QueueInfo = { + "New": { "is": is_new, "process": acknowledge_new }, + "Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand }, + "Byhand" : { "is": is_byhand, "process": do_byhand }, + "OldStableUpdate" : { "is": is_oldstableupdate, + "process": do_oldstableupdate }, + "StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate }, + "Unembargo" : { "is": is_unembargo, "process": queue_unembargo }, + "Embargo" : { "is": is_embargo, "process": queue_embargo }, + }