"""
def __init__(self, configuration, featureSet=None):
- self.recipecache = None
+ self.recipecaches = None
self.skiplist = {}
self.featureset = CookerFeatures()
if featureSet:
nice = int(nice) - curnice
buildlog.verbose("Renice to %s " % os.nice(nice))
- if self.recipecache:
- del self.recipecache
- self.recipecache = bb.cache.CacheData(self.caches_array)
+ if self.recipecaches:
+ del self.recipecaches
+ self.multiconfigs = self.databuilder.mcdata.keys()
+ self.recipecaches = {}
+ for mc in self.multiconfigs:
+ self.recipecaches[mc] = bb.cache.CacheData(self.caches_array)
- self.handleCollections( self.data.getVar("BBFILE_COLLECTIONS", True) )
+ self.handleCollections(self.data.getVar("BBFILE_COLLECTIONS", True))
def updateConfigOpts(self, options, environment):
clean = True
def showVersions(self):
- pkg_pn = self.recipecache.pkg_pn
- (latest_versions, preferred_versions) = bb.providers.findProviders(self.data, self.recipecache, pkg_pn)
+ pkg_pn = self.recipecaches[''].pkg_pn
+ (latest_versions, preferred_versions) = bb.providers.findProviders(self.data, self.recipecaches[''], pkg_pn)
logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version")
logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================")
# this showEnvironment() code path doesn't use the cache
self.parseConfiguration()
- fn, cls = bb.cache.virtualfn2realfn(buildfile)
+ fn, cls, mc = bb.cache.virtualfn2realfn(buildfile)
fn = self.matchFile(fn)
- fn = bb.cache.realfn2virtual(fn, cls)
+ fn = bb.cache.realfn2virtual(fn, cls, mc)
elif len(pkgs_to_build) == 1:
ignore = self.expanded_data.getVar("ASSUME_PROVIDED", True) or ""
if pkgs_to_build[0] in set(ignore.split()):
bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0])
- taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True)
+ taskdata, runlist = self.buildTaskData(pkgs_to_build, None, self.configuration.abort, allowincomplete=True)
- fn = taskdata.build_targets[pkgs_to_build[0]][0]
+ mc = runlist[0][0]
+ fn = runlist[0][3]
else:
envdata = self.data
task = self.configuration.cmd
fulltargetlist = self.checkPackages(pkgs_to_build)
+ taskdata = {}
+ localdata = {}
- localdata = data.createCopy(self.data)
- bb.data.update_data(localdata)
- bb.data.expandKeys(localdata)
- taskdata = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete)
+ for mc in self.multiconfigs:
+ taskdata[mc] = bb.taskdata.TaskData(abort, skiplist=self.skiplist, allowincomplete=allowincomplete)
+ localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
+ bb.data.update_data(localdata[mc])
+ bb.data.expandKeys(localdata[mc])
current = 0
runlist = []
for k in fulltargetlist:
+ mc = ""
+ if k.startswith("multiconfig:"):
+ mc = k.split(":")[1]
+ k = ":".join(k.split(":")[2:])
ktask = task
if ":do_" in k:
k2 = k.split(":do_")
k = k2[0]
ktask = k2[1]
- taskdata.add_provider(localdata, self.recipecache, k)
+ taskdata[mc].add_provider(localdata[mc], self.recipecaches[mc], k)
current += 1
if not ktask.startswith("do_"):
ktask = "do_%s" % ktask
- runlist.append([k, ktask])
+ if k not in taskdata[mc].build_targets or not taskdata[mc].build_targets[k]:
+ # e.g. in ASSUME_PROVIDED
+ continue
+ fn = taskdata[mc].build_targets[k][0]
+ runlist.append([mc, k, ktask, fn])
bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data)
- taskdata.add_unresolved(localdata, self.recipecache)
+
+ for mc in self.multiconfigs:
+ taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
+
bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data)
- return taskdata, runlist, fulltargetlist
+ return taskdata, runlist
def prepareTreeData(self, pkgs_to_build, task):
"""
# We set abort to False here to prevent unbuildable targets raising
# an exception when we're just generating data
- taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
+ taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
return runlist, taskdata
information.
"""
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
- rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist)
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
rq.rqdata.prepare()
return self.buildDependTree(rq, taskdata)
+ @staticmethod
+ def add_mc_prefix(mc, pn):
+ if mc:
+ return "multiconfig:%s.%s" % (mc, pn)
+ return pn
def buildDependTree(self, rq, taskdata):
seen_fns = []
depend_tree["rdepends-pkg"] = {}
depend_tree["rrecs-pkg"] = {}
depend_tree['providermap'] = {}
- depend_tree["layer-priorities"] = self.recipecache.bbfile_config_priorities
+ depend_tree["layer-priorities"] = self.bbfile_config_priorities
- for name, fn in list(taskdata.get_providermap().items()):
- pn = self.recipecache.pkg_fn[fn]
- if name != pn:
- version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn]
- depend_tree['providermap'][name] = (pn, version)
+ for mc in taskdata:
+ for name, fn in list(taskdata[mc].get_providermap().items()):
+ pn = self.recipecaches[mc].pkg_fn[fn]
+ pn = self.add_mc_prefix(mc, pn)
+ if name != pn:
+ version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[fn]
+ depend_tree['providermap'][name] = (pn, version)
for tid in rq.rqdata.runtaskentries:
- taskname = bb.runqueue.taskname_from_tid(tid)
- fn = bb.runqueue.fn_from_tid(tid)
- pn = self.recipecache.pkg_fn[fn]
- version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn]
+ (mc, fn, taskname) = bb.runqueue.split_tid(tid)
+ taskfn = bb.runqueue.taskfn_fromtid(tid)
+ pn = self.recipecaches[mc].pkg_fn[taskfn]
+ pn = self.add_mc_prefix(mc, pn)
+ version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn]
if pn not in depend_tree["pn"]:
depend_tree["pn"][pn] = {}
- depend_tree["pn"][pn]["filename"] = fn
+ depend_tree["pn"][pn]["filename"] = taskfn
depend_tree["pn"][pn]["version"] = version
- depend_tree["pn"][pn]["inherits"] = self.recipecache.inherits.get(fn, None)
+ depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None)
# if we have extra caches, list all attributes they bring in
extra_info = []
# for all attributes stored, add them to the dependency tree
for ei in extra_info:
- depend_tree["pn"][pn][ei] = vars(self.recipecache)[ei][fn]
+ depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn]
for dep in rq.rqdata.runtaskentries[tid].depends:
- depfn = bb.runqueue.fn_from_tid(dep)
- deppn = self.recipecache.pkg_fn[depfn]
+ (depmc, depfn, deptaskname) = bb.runqueue.split_tid(dep)
+ deptaskfn = bb.runqueue.taskfn_fromtid(dep)
+ deppn = self.recipecaches[mc].pkg_fn[deptaskfn]
dotname = "%s.%s" % (pn, bb.runqueue.taskname_from_tid(tid))
if not dotname in depend_tree["tdepends"]:
depend_tree["tdepends"][dotname] = []
depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep)))
- if fn not in seen_fns:
- seen_fns.append(fn)
+ if taskfn not in seen_fns:
+ seen_fns.append(taskfn)
packages = []
depend_tree["depends"][pn] = []
- for dep in taskdata.depids[fn]:
+ for dep in taskdata[mc].depids[taskfn]:
depend_tree["depends"][pn].append(dep)
depend_tree["rdepends-pn"][pn] = []
- for rdep in taskdata.rdepids[fn]:
+ for rdep in taskdata[mc].rdepids[taskfn]:
depend_tree["rdepends-pn"][pn].append(rdep)
- rdepends = self.recipecache.rundeps[fn]
+ rdepends = self.recipecaches[mc].rundeps[taskfn]
for package in rdepends:
depend_tree["rdepends-pkg"][package] = []
for rdepend in rdepends[package]:
depend_tree["rdepends-pkg"][package].append(rdepend)
packages.append(package)
- rrecs = self.recipecache.runrecs[fn]
+ rrecs = self.recipecaches[mc].runrecs[taskfn]
for package in rrecs:
depend_tree["rrecs-pkg"][package] = []
for rdepend in rrecs[package]:
if package not in depend_tree["packages"]:
depend_tree["packages"][package] = {}
depend_tree["packages"][package]["pn"] = pn
- depend_tree["packages"][package]["filename"] = fn
+ depend_tree["packages"][package]["filename"] = taskfn
depend_tree["packages"][package]["version"] = version
return depend_tree
cachefields = getattr(cache_class, 'cachefields', [])
extra_info = extra_info + cachefields
- for tid in taskdata.taskentries:
- fn = bb.runqueue.fn_from_tid(tid)
- pn = self.recipecache.pkg_fn[fn]
+ tids = []
+ for mc in taskdata:
+ for tid in taskdata[mc].taskentries:
+ tids.append(tid)
+
+ for tid in tids:
+ (mc, fn, taskname) = bb.runqueue.split_tid(tid)
+ taskfn = bb.runqueue.taskfn_fromtid(tid)
+
+ pn = self.recipecaches[mc].pkg_fn[taskfn]
+ pn = self.add_mc_prefix(mc, pn)
if pn not in depend_tree["pn"]:
depend_tree["pn"][pn] = {}
- depend_tree["pn"][pn]["filename"] = fn
- version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn]
+ depend_tree["pn"][pn]["filename"] = taskfn
+ version = "%s:%s-%s" % self.recipecaches[mc].pkg_pepvpr[taskfn]
depend_tree["pn"][pn]["version"] = version
- rdepends = self.recipecache.rundeps[fn]
- rrecs = self.recipecache.runrecs[fn]
- depend_tree["pn"][pn]["inherits"] = self.recipecache.inherits.get(fn, None)
+ rdepends = self.recipecaches[mc].rundeps[taskfn]
+ rrecs = self.recipecaches[mc].runrecs[taskfn]
+ depend_tree["pn"][pn]["inherits"] = self.recipecaches[mc].inherits.get(taskfn, None)
# for all extra attributes stored, add them to the dependency tree
for ei in extra_info:
- depend_tree["pn"][pn][ei] = vars(self.recipecache)[ei][fn]
+ depend_tree["pn"][pn][ei] = vars(self.recipecaches[mc])[ei][taskfn]
- if fn not in seen_fns:
- seen_fns.append(fn)
+ if taskfn not in seen_fns:
+ seen_fns.append(taskfn)
depend_tree["depends"][pn] = []
- for item in taskdata.depids[fn]:
+ for item in taskdata[mc].depids[taskfn]:
pn_provider = ""
- if dep in taskdata.build_targets and taskdata.build_targets[dep]:
- fn_provider = taskdata.build_targets[dep][0]
- pn_provider = self.recipecache.pkg_fn[fn_provider]
+ if dep in taskdata[mc].build_targets and taskdata[mc].build_targets[dep]:
+ fn_provider = taskdata[mc].build_targets[dep][0]
+ pn_provider = self.recipecaches[mc].pkg_fn[fn_provider]
else:
pn_provider = item
+ pn_provider = self.add_mc_prefix(mc, pn_provider)
depend_tree["depends"][pn].append(pn_provider)
depend_tree["rdepends-pn"][pn] = []
- for rdep in taskdata.rdepids[fn]:
+ for rdep in taskdata[mc].rdepids[taskfn]:
pn_rprovider = ""
- if rdep in taskdata.run_targets and taskdata.run_targets[rdep]:
- fn_rprovider = taskdata.run_targets[rdep][0]
- pn_rprovider = self.recipecache.pkg_fn[fn_rprovider]
+ if rdep in taskdata[mc].run_targets and taskdata[mc].run_targets[rdep]:
+ fn_rprovider = taskdata[mc].run_targets[rdep][0]
+ pn_rprovider = self.recipecaches[mc].pkg_fn[fn_rprovider]
else:
pn_rprovider = rdep
+ pn_rprovider = self.add_mc_prefix(mc, pn_rprovider)
depend_tree["rdepends-pn"][pn].append(pn_rprovider)
depend_tree["rdepends-pkg"].update(rdepends)
# Determine which bbappends haven't been applied
# First get list of recipes, including skipped
- recipefns = list(self.recipecache.pkg_fn.keys())
+ recipefns = list(self.recipecaches[''].pkg_fn.keys())
recipefns.extend(self.skiplist.keys())
# Work out list of bbappends that have been applied
def handlePrefProviders(self):
- localdata = data.createCopy(self.data)
- bb.data.update_data(localdata)
- bb.data.expandKeys(localdata)
+ for mc in self.multiconfigs:
+ localdata = data.createCopy(self.databuilder.mcdata[mc])
+ bb.data.update_data(localdata)
+ bb.data.expandKeys(localdata)
- # Handle PREFERRED_PROVIDERS
- for p in (localdata.getVar('PREFERRED_PROVIDERS', True) or "").split():
- try:
- (providee, provider) = p.split(':')
- except:
- providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p)
- continue
- if providee in self.recipecache.preferred and self.recipecache.preferred[providee] != provider:
- providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecache.preferred[providee])
- self.recipecache.preferred[providee] = provider
+ # Handle PREFERRED_PROVIDERS
+ for p in (localdata.getVar('PREFERRED_PROVIDERS', True) or "").split():
+ try:
+ (providee, provider) = p.split(':')
+ except:
+ providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p)
+ continue
+ if providee in self.recipecaches[mc].preferred and self.recipecaches[mc].preferred[providee] != provider:
+ providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecaches[mc].preferred[providee])
+ self.recipecaches[mc].preferred[providee] = provider
def findCoreBaseFiles(self, subdir, configfile):
corebase = self.data.getVar('COREBASE', True) or ""
"""
pkg_list = []
- for pfn in self.recipecache.pkg_fn:
- inherits = self.recipecache.inherits.get(pfn, None)
+ for pfn in self.recipecaches[''].pkg_fn:
+ inherits = self.recipecaches[''].inherits.get(pfn, None)
if inherits and klass in inherits:
- pkg_list.append(self.recipecache.pkg_fn[pfn])
+ pkg_list.append(self.recipecaches[''].pkg_fn[pfn])
return pkg_list
shell.start( self )
- def handleCollections( self, collections ):
+ def handleCollections(self, collections):
"""Handle collections"""
errors = False
- self.recipecache.bbfile_config_priorities = []
+ self.bbfile_config_priorities = []
if collections:
collection_priorities = {}
collection_depends = {}
parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex)
errors = True
continue
- self.recipecache.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c]))
+ self.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c]))
if errors:
# We've already printed the actual error(s)
raise CollectionError("Errors during parsing layer configuration")
if bf.startswith("/") or bf.startswith("../"):
bf = os.path.abspath(bf)
- self.collection = CookerCollectFiles(self.recipecache.bbfile_config_priorities)
+ self.collection = CookerCollectFiles(self.bbfile_config_priorities)
filelist, masked = self.collection.collect_bbfiles(self.data, self.expanded_data)
try:
os.stat(bf)
if (task == None):
task = self.configuration.cmd
- fn, cls = bb.cache.virtualfn2realfn(buildfile)
+ fn, cls, mc = bb.cache.virtualfn2realfn(buildfile)
fn = self.matchFile(fn)
self.buildSetVars()
infos = bb_cache.parse(fn, self.collection.get_file_appends(fn))
infos = dict(infos)
- fn = bb.cache.realfn2virtual(fn, cls)
+ fn = bb.cache.realfn2virtual(fn, cls, mc)
try:
info_array = infos[fn]
except KeyError:
if info_array[0].skipped:
bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason))
- self.recipecache.add_from_recipeinfo(fn, info_array)
+ self.recipecaches[mc].add_from_recipeinfo(fn, info_array)
# Tweak some variables
item = info_array[0].pn
- self.recipecache.ignored_dependencies = set()
- self.recipecache.bbfile_priority[fn] = 1
+ self.recipecaches[mc].ignored_dependencies = set()
+ self.recipecaches[mc].bbfile_priority[fn] = 1
# Remove external dependencies
- self.recipecache.task_deps[fn]['depends'] = {}
- self.recipecache.deps[fn] = []
- self.recipecache.rundeps[fn] = []
- self.recipecache.runrecs[fn] = []
+ self.recipecaches[mc].task_deps[fn]['depends'] = {}
+ self.recipecaches[mc].deps[fn] = []
+ self.recipecaches[mc].rundeps[fn] = []
+ self.recipecaches[mc].runrecs[fn] = []
# Invalidate task for target if force mode active
if self.configuration.force:
logger.verbose("Invalidate task %s, %s", task, fn)
if not task.startswith("do_"):
task = "do_%s" % task
- bb.parse.siggen.invalidate_task(task, self.recipecache, fn)
+ bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn)
# Setup taskdata structure
- taskdata = bb.taskdata.TaskData(self.configuration.abort)
- taskdata.add_provider(self.data, self.recipecache, item)
+ taskdata = {}
+ taskdata[mc] = bb.taskdata.TaskData(self.configuration.abort)
+ taskdata[mc].add_provider(self.data, self.recipecaches[mc], item)
buildname = self.data.getVar("BUILDNAME", True)
bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.expanded_data)
# Execute the runqueue
if not task.startswith("do_"):
task = "do_%s" % task
- runlist = [[item, task]]
+ runlist = [[mc, item, task, fn]]
- rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist)
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
def buildFileIdle(server, rq, abort):
packages = ["%s:%s" % (target, task) for target in targets]
bb.event.fire(bb.event.BuildInit(packages), self.expanded_data)
- taskdata, runlist, fulltargetlist = self.buildTaskData(targets, task, self.configuration.abort)
+ taskdata, runlist = self.buildTaskData(targets, task, self.configuration.abort)
buildname = self.data.getVar("BUILDNAME", False)
# make targets to always look as <target>:do_<task>
ntargets = []
- for target in fulltargetlist:
- if ":" in target:
- if ":do_" not in target:
- target = "%s:do_%s" % tuple(target.split(":", 1))
- else:
- target = "%s:%s" % (target, task)
- ntargets.append(target)
+ for target in runlist:
+ if target[0]:
+ ntargets.append("multiconfig:%s:%s:%s" % (target[0], target[1], target[2]))
+ ntargets.append("%s:%s" % (target[1], target[2]))
bb.event.fire(bb.event.BuildStarted(buildname, ntargets), self.data)
- rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist)
+ rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
if 'universe' in targets:
rq.rqdata.warn_multi_bb = True
if CookerFeatures.SEND_SANITYEVENTS in self.featureset:
bb.event.fire(bb.event.SanityCheck(False), self.data)
- ignore = self.expanded_data.getVar("ASSUME_PROVIDED", True) or ""
- self.recipecache.ignored_dependencies = set(ignore.split())
+ for mc in self.multiconfigs:
+ ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED", True) or ""
+ self.recipecaches[mc].ignored_dependencies = set(ignore.split())
- for dep in self.configuration.extra_assume_provided:
- self.recipecache.ignored_dependencies.add(dep)
+ for dep in self.configuration.extra_assume_provided:
+ self.recipecaches[mc].ignored_dependencies.add(dep)
- self.collection = CookerCollectFiles(self.recipecache.bbfile_config_priorities)
+ self.collection = CookerCollectFiles(self.bbfile_config_priorities)
(filelist, masked) = self.collection.collect_bbfiles(self.data, self.expanded_data)
self.parser = CookerParser(self, filelist, masked)
raise bb.BBHandledException()
self.show_appends_with_no_recipes()
self.handlePrefProviders()
- self.recipecache.bbfile_priority = self.collection.collection_priorities(self.recipecache.pkg_fn, self.data)
+ for mc in self.multiconfigs:
+ self.recipecaches[mc].bbfile_priority = self.collection.collection_priorities(self.recipecaches[mc].pkg_fn, self.data)
self.state = state.running
# Send an event listing all stamps reachable after parsing
# which the metadata may use to clean up stale data
- event = bb.event.ReachableStamps(self.recipecache.stamp)
- bb.event.fire(event, self.expanded_data)
+ for mc in self.multiconfigs:
+ event = bb.event.ReachableStamps(self.recipecaches[mc].stamp)
+ bb.event.fire(event, self.databuilder.mcdata[mc])
return None
return True
parselog.warning("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg)
if 'world' in pkgs_to_build:
- bb.providers.buildWorldTargetList(self.recipecache)
pkgs_to_build.remove('world')
- for t in self.recipecache.world_target:
- pkgs_to_build.append(t)
+ for mc in self.multiconfigs:
+ bb.providers.buildWorldTargetList(self.recipecaches[mc])
+ for t in self.recipecaches[mc].world_target:
+ if mc:
+ t = "multiconfig:" + mc + ":" + t
+ pkgs_to_build.append(t)
if 'universe' in pkgs_to_build:
parselog.warning("The \"universe\" target is only intended for testing and may produce errors.")
parselog.debug(1, "collating packages for \"universe\"")
pkgs_to_build.remove('universe')
- for t in self.recipecache.universe_target:
- pkgs_to_build.append(t)
+ for mc in self.multiconfigs:
+ for t in self.recipecaches[mc].universe_target:
+ if mc:
+ t = "multiconfig:" + mc + ":" + t
+ pkgs_to_build.append(t)
return pkgs_to_build
-
-
-
def pre_serve(self):
# Empty the environment. The environment will be populated as
# necessary from the data store.
# Calculate priorities for each file
matched = set()
for p in pkgfns:
- realfn, cls = bb.cache.virtualfn2realfn(p)
+ realfn, cls, mc = bb.cache.virtualfn2realfn(p)
priorities[p] = self.calc_bbfile_priority(realfn, matched)
# Don't show the warning if the BBFILE_PATTERN did match .bbappend files
if info_array[0].skipped:
self.skipped += 1
self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0])
- self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecache,
+ (fn, cls, mc) = bb.cache.virtualfn2realfn(virtualfn)
+ self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecaches[mc],
parsed=parsed, watcher = self.cooker.add_filewatch)
return True
def reparse(self, filename):
infos = self.bb_cache.parse(filename, self.cooker.collection.get_file_appends(filename))
for vfn, info_array in infos:
- self.cooker.recipecache.add_from_recipeinfo(vfn, info_array)
+ (fn, cls, mc) = bb.cache.virtualfn2realfn(vfn)
+ self.cooker.recipecaches[mc].add_from_recipeinfo(vfn, info_array)
def taskname_from_tid(tid):
return tid.rsplit(":", 1)[1]
+def split_tid(tid):
+ if tid.startswith('multiconfig:'):
+ elems = tid.split(':')
+ mc = elems[1]
+ fn = ":".join(elems[2:-1])
+ taskname = elems[-1]
+ else:
+ tid = tid.rsplit(":", 1)
+ mc = ""
+ fn = tid[0]
+ taskname = tid[1]
+
+ return (mc, fn, taskname)
+
+def build_tid(mc, fn, taskname):
+ if mc:
+ return "multiconfig:" + mc + ":" + fn + ":" + taskname
+ return fn + ":" + taskname
+
+def taskfn_fromtid(tid):
+ (mc, fn, taskname) = split_tid(tid)
+ if mc:
+ return "multiconfig:" + mc + ":" + fn
+ return fn
+
class RunQueueStats:
"""
Holds statistics on the tasks handled by the associated runQueue
self.buildable = []
self.stamps = {}
for tid in self.rqdata.runtaskentries:
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid)
- self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
+ (mc, fn, taskname) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
+ self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
if tid in self.rq.runq_buildable:
self.buildable.append(tid)
"""
BitBake Run Queue implementation
"""
- def __init__(self, rq, cooker, cfgData, dataCache, taskData, targets):
+ def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
self.cooker = cooker
- self.dataCache = dataCache
+ self.dataCaches = dataCaches
self.taskData = taskData
self.targets = targets
self.rq = rq
return tid + task_name_suffix
def get_short_user_idstring(self, task, task_name_suffix = ""):
- fn = fn_from_tid(task)
- pn = self.dataCache.pkg_fn[fn]
+ (mc, fn, taskname) = split_tid(task)
+ pn = self.dataCaches[mc].pkg_fn[fn]
taskname = taskname_from_tid(task) + task_name_suffix
return "%s:%s" % (pn, taskname)
taskData = self.taskData
- if len(taskData.taskentries) == 0:
+ found = False
+ for mc in self.taskData:
+ if len(taskData[mc].taskentries) > 0:
+ found = True
+ break
+ if not found:
# Nothing to do
return 0
# process is repeated for each type of dependency (tdepends, deptask,
# rdeptast, recrdeptask, idepends).
- def add_build_dependencies(depids, tasknames, depends):
+ def add_build_dependencies(depids, tasknames, depends, mc):
for depname in depids:
# Won't be in build_targets if ASSUME_PROVIDED
- if depname not in taskData.build_targets or not taskData.build_targets[depname]:
+ if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
continue
- depdata = taskData.build_targets[depname][0]
+ depdata = taskData[mc].build_targets[depname][0]
if depdata is None:
continue
for taskname in tasknames:
t = depdata + ":" + taskname
- if t in taskData.taskentries:
+ if t in taskData[mc].taskentries:
depends.add(t)
- def add_runtime_dependencies(depids, tasknames, depends):
+ def add_runtime_dependencies(depids, tasknames, depends, mc):
for depname in depids:
- if depname not in taskData.run_targets or not taskData.run_targets[depname]:
+ if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
continue
- depdata = taskData.run_targets[depname][0]
+ depdata = taskData[mc].run_targets[depname][0]
if depdata is None:
continue
for taskname in tasknames:
t = depdata + ":" + taskname
- if t in taskData.taskentries:
+ if t in taskData[mc].taskentries:
depends.add(t)
- def add_resolved_dependencies(fn, tasknames, depends):
+ def add_resolved_dependencies(mc, fn, tasknames, depends):
for taskname in tasknames:
- tid = fn + ":" + taskname
+ tid = build_tid(mc, fn, taskname)
if tid in self.runtaskentries:
depends.add(tid)
- for tid in taskData.taskentries:
+ for mc in taskData:
+ for tid in taskData[mc].taskentries:
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid)
+ (mc, fn, taskname) = split_tid(tid)
+ #runtid = build_tid(mc, fn, taskname)
+ taskfn = taskfn_fromtid(tid)
- depends = set()
- task_deps = self.dataCache.task_deps[fn]
+ #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
- self.runtaskentries[tid] = RunTaskEntry()
+ depends = set()
+ task_deps = self.dataCaches[mc].task_deps[taskfn]
- #logger.debug(2, "Processing %s:%s", fn, taskname)
+ self.runtaskentries[tid] = RunTaskEntry()
- if fn not in taskData.failed_fns:
+ if fn in taskData[mc].failed_fns:
+ continue
# Resolve task internal dependencies
#
# e.g. addtask before X after Y
- depends.update(taskData.taskentries[tid].tdepends)
+ for t in taskData[mc].taskentries[tid].tdepends:
+ (_, depfn, deptaskname) = split_tid(t)
+ depends.add(build_tid(mc, depfn, deptaskname))
# Resolve 'deptask' dependencies
#
# (makes sure sometask runs after someothertask of all DEPENDS)
if 'deptask' in task_deps and taskname in task_deps['deptask']:
tasknames = task_deps['deptask'][taskname].split()
- add_build_dependencies(taskData.depids[fn], tasknames, depends)
+ add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
# Resolve 'rdeptask' dependencies
#
# (makes sure sometask runs after someothertask of all RDEPENDS)
if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
tasknames = task_deps['rdeptask'][taskname].split()
- add_runtime_dependencies(taskData.rdepids[fn], tasknames, depends)
+ add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
# Resolve inter-task dependencies
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
- idepends = taskData.taskentries[tid].idepends
+ idepends = taskData[mc].taskentries[tid].idepends
for (depname, idependtask) in idepends:
- if depname in taskData.build_targets and taskData.build_targets[depname] and not depname in taskData.failed_deps:
+ if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
# Won't be in build_targets if ASSUME_PROVIDED
- depdata = taskData.build_targets[depname][0]
+ depdata = taskData[mc].build_targets[depname][0]
if depdata is not None:
t = depdata + ":" + idependtask
depends.add(t)
- if t not in taskData.taskentries:
+ if t not in taskData[mc].taskentries:
bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
- irdepends = taskData.taskentries[tid].irdepends
+ irdepends = taskData[mc].taskentries[tid].irdepends
for (depname, idependtask) in irdepends:
- if depname in taskData.run_targets:
+ if depname in taskData[mc].run_targets:
# Won't be in run_targets if ASSUME_PROVIDED
- depdata = taskData.run_targets[depname][0]
+ depdata = taskData[mc].run_targets[depname][0]
if depdata is not None:
t = depdata + ":" + idependtask
depends.add(t)
- if t not in taskData.taskentries:
+ if t not in taskData[mc].taskentries:
bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
# Resolve recursive 'recrdeptask' dependencies (Part A)
if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
tasknames = task_deps['recrdeptask'][taskname].split()
recursivetasks[tid] = tasknames
- add_build_dependencies(taskData.depids[fn], tasknames, depends)
- add_runtime_dependencies(taskData.rdepids[fn], tasknames, depends)
+ add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
+ add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
if taskname in tasknames:
recursivetasksselfref.add(tid)
if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
recursiveitasks[tid] = []
for t in task_deps['recideptask'][taskname].split():
- newdep = fn + ":" + t
+ newdep = build_tid(mc, fn, t)
recursiveitasks[tid].append(newdep)
- self.runtaskentries[tid].depends = depends
+ self.runtaskentries[tid].depends = depends
+
+ #self.dump_data()
# Resolve recursive 'recrdeptask' dependencies (Part B)
#
def generate_recdeps(t):
newdeps = set()
- add_resolved_dependencies(fn_from_tid(t), tasknames, newdeps)
+ (mc, fn, taskname) = split_tid(t)
+ add_resolved_dependencies(mc, fn, tasknames, newdeps)
extradeps[tid].update(newdeps)
seendeps.add(t)
newdeps.add(t)
self.init_progress_reporter.next_stage()
+ #self.dump_data()
+
# Step B - Mark all active tasks
#
# Start with the tasks we were asked to run and mark all dependencies
for depend in depends:
mark_active(depend, depth+1)
- self.target_pairs = []
- for target in self.targets:
- if target[0] not in taskData.build_targets or not taskData.build_targets[target[0]]:
+ self.target_tids = []
+ for (mc, target, task, fn) in self.targets:
+
+ if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
continue
- if target[0] in taskData.failed_deps:
+ if target in taskData[mc].failed_deps:
continue
- fn = taskData.build_targets[target[0]][0]
- task = target[1]
parents = False
if task.endswith('-'):
parents = True
task = task[:-1]
- self.target_pairs.append((fn, task))
-
- if fn in taskData.failed_fns:
+ if fn in taskData[mc].failed_fns:
continue
+ # fn already has mc prefix
tid = fn + ":" + task
- if tid not in taskData.taskentries:
+ self.target_tids.append(tid)
+ if tid not in taskData[mc].taskentries:
import difflib
tasks = []
- for x in taskData.taskentries:
+ for x in taskData[mc].taskentries:
if x.startswith(fn + ":"):
tasks.append(taskname_from_tid(x))
close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
extra = ". Close matches:\n %s" % "\n ".join(close_matches)
else:
extra = ""
- bb.msg.fatal("RunQueue", "Task %s does not exist for target %s%s" % (task, target[0], extra))
+ bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
# For tasks called "XXXX-", ony run their dependencies
if parents:
# Check to make sure we still have tasks to run
if len(self.runtaskentries) == 0:
- if not taskData.abort:
+ if not taskData[''].abort:
bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
else:
bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
endpoints.append(tid)
for dep in revdeps:
if dep in self.runtaskentries[tid].depends:
- #self.dump_data(taskData)
bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
self.init_progress_reporter.next_stage()
# Sanity Check - Check for multiple tasks building the same provider
- prov_list = {}
- seen_fn = []
- for tid in self.runtaskentries:
- fn = fn_from_tid(tid)
- if fn in seen_fn:
- continue
- seen_fn.append(fn)
- for prov in self.dataCache.fn_provides[fn]:
- if prov not in prov_list:
- prov_list[prov] = [fn]
- elif fn not in prov_list[prov]:
- prov_list[prov].append(fn)
- for prov in prov_list:
- if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist:
+ for mc in self.dataCaches:
+ prov_list = {}
+ seen_fn = []
+ for tid in self.runtaskentries:
+ (tidmc, fn, taskname) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
+ if taskfn in seen_fn:
+ continue
+ if mc != tidmc:
+ continue
+ seen_fn.append(taskfn)
+ for prov in self.dataCaches[mc].fn_provides[taskfn]:
+ if prov not in prov_list:
+ prov_list[prov] = [taskfn]
+ elif taskfn not in prov_list[prov]:
+ prov_list[prov].append(taskfn)
+ for prov in prov_list:
+ if len(prov_list[prov]) < 2:
+ continue
+ if prov in self.multi_provider_whitelist:
+ continue
seen_pn = []
# If two versions of the same PN are being built its fatal, we don't support it.
for fn in prov_list[prov]:
- pn = self.dataCache.pkg_fn[fn]
+ pn = self.dataCaches[mc].pkg_fn[fn]
if pn not in seen_pn:
seen_pn.append(pn)
else:
commonprovs = None
commonrprovs = None
for provfn in prov_list[prov]:
- provides = set(self.dataCache.fn_provides[provfn])
+ provides = set(self.dataCaches[mc].fn_provides[provfn])
rprovides = set()
- for rprovide in self.dataCache.rproviders:
- if provfn in self.dataCache.rproviders[rprovide]:
+ for rprovide in self.dataCaches[mc].rproviders:
+ if provfn in self.dataCaches[mc].rproviders[rprovide]:
rprovides.add(rprovide)
- for package in self.dataCache.packages:
- if provfn in self.dataCache.packages[package]:
+ for package in self.dataCaches[mc].packages:
+ if provfn in self.dataCaches[mc].packages[package]:
rprovides.add(package)
- for package in self.dataCache.packages_dynamic:
- if provfn in self.dataCache.packages_dynamic[package]:
+ for package in self.dataCaches[mc].packages_dynamic:
+ if provfn in self.dataCaches[mc].packages_dynamic[package]:
rprovides.add(package)
if not commonprovs:
commonprovs = set(provides)
self.init_progress_reporter.next_stage()
# Create a whitelist usable by the stamp checks
- stampfnwhitelist = []
- for entry in self.stampwhitelist.split():
- if entry not in self.taskData.build_targets:
- continue
- fn = self.taskData.build_targets[entry][0]
- stampfnwhitelist.append(fn)
- self.stampfnwhitelist = stampfnwhitelist
+ self.stampfnwhitelist = {}
+ for mc in self.taskData:
+ self.stampfnwhitelist[mc] = []
+ for entry in self.stampwhitelist.split():
+ if entry not in self.taskData[mc].build_targets:
+ continue
+ fn = self.taskData.build_targets[entry][0]
+ self.stampfnwhitelist[mc].append(fn)
self.init_progress_reporter.next_stage()
self.runq_setscene_tids = []
if not self.cooker.configuration.nosetscene:
for tid in self.runtaskentries:
- setscenetid = tid + "_setscene"
- if setscenetid not in taskData.taskentries:
+ (mc, fn, taskname) = split_tid(tid)
+ setscenetid = fn + ":" + taskname + "_setscene"
+ if setscenetid not in taskData[mc].taskentries:
continue
- task = self.runtaskentries[tid].task
self.runq_setscene_tids.append(tid)
- def invalidate_task(fn, taskname, error_nostamp):
- taskdep = self.dataCache.task_deps[fn]
- tid = fn + ":" + taskname
- if tid not in taskData.taskentries:
+ def invalidate_task(tid, error_nostamp):
+ (mc, fn, taskname) = split_tid(tid)
+ taskdep = self.dataCaches[mc].task_deps[fn]
+ if fn + ":" + taskname not in taskData[mc].taskentries:
logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
if error_nostamp:
bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
else:
logger.verbose("Invalidate task %s, %s", taskname, fn)
- bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn)
+ bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
self.init_progress_reporter.next_stage()
# Invalidate task if force mode active
if self.cooker.configuration.force:
- for (fn, target) in self.target_pairs:
- invalidate_task(fn, target, False)
+ for tid in self.target_tids:
+ invalidate_task(tid, False)
# Invalidate task if invalidate mode active
if self.cooker.configuration.invalidate_stamp:
- for (fn, target) in self.target_pairs:
+ for tid in self.target_tids:
+ fn = fn_from_tid(tid)
for st in self.cooker.configuration.invalidate_stamp.split(','):
if not st.startswith("do_"):
st = "do_%s" % st
- invalidate_task(fn, st, True)
+ invalidate_task(fn + ":" + st, True)
self.init_progress_reporter.next_stage()
# Create and print to the logs a virtual/xxxx -> PN (fn) table
- virtmap = taskData.get_providermap(prefix="virtual/")
- virtpnmap = {}
- for v in virtmap:
- virtpnmap[v] = self.dataCache.pkg_fn[virtmap[v]]
- bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
- if hasattr(bb.parse.siggen, "tasks_resolved"):
- bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCache)
+ for mc in taskData:
+ virtmap = taskData[mc].get_providermap(prefix="virtual/")
+ virtpnmap = {}
+ for v in virtmap:
+ virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
+ bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
+ if hasattr(bb.parse.siggen, "tasks_resolved"):
+ bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
self.init_progress_reporter.next_stage()
procdep = []
for dep in self.runtaskentries[tid].depends:
procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
- self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(fn_from_tid(tid), taskname_from_tid(tid), procdep, self.dataCache)
+ (mc, fn, taskname) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
+ self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
task = self.runtaskentries[tid].task
bb.parse.siggen.writeout_file_checksum_cache()
+
+ #self.dump_data()
return len(self.runtaskentries)
- def dump_data(self, taskQueue):
+ def dump_data(self):
"""
Dump some debug information on the internal data structures
"""
self.runtaskentries[tid].depends,
self.runtaskentries[tid].revdeps)
- logger.debug(3, "sorted_tasks:")
- for tid in self.prio_map:
- logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
- self.runtaskentries[tid].weight,
- self.runtaskentries[tid].depends,
- self.runtaskentries[tid].revdeps)
-
class RunQueueWorker():
def __init__(self, process, pipe):
self.process = process
self.pipe = pipe
class RunQueue:
- def __init__(self, cooker, cfgData, dataCache, taskData, targets):
+ def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
self.cooker = cooker
self.cfgData = cfgData
- self.rqdata = RunQueueData(self, cooker, cfgData, dataCache, taskData, targets)
+ self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
self.worker = {}
self.fakeworker = {}
- def _start_worker(self, fakeroot = False, rqexec = None):
+ def _start_worker(self, mc, fakeroot = False, rqexec = None):
logger.debug(1, "Starting bitbake-worker")
magic = "decafbad"
if self.cooker.configuration.profile:
runqhash[tid] = self.rqdata.runtaskentries[tid].hash
workerdata = {
- "taskdeps" : self.rqdata.dataCache.task_deps,
- "fakerootenv" : self.rqdata.dataCache.fakerootenv,
- "fakerootdirs" : self.rqdata.dataCache.fakerootdirs,
- "fakerootnoenv" : self.rqdata.dataCache.fakerootnoenv,
+ "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
+ "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
+ "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
+ "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
"sigdata" : bb.parse.siggen.get_taskdata(),
"runq_hash" : runqhash,
"logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
if self.worker:
self.teardown_workers()
self.teardown = False
- self.worker[''] = self._start_worker()
+ for mc in self.rqdata.dataCaches:
+ self.worker[mc] = self._start_worker(mc)
def start_fakeworker(self, rqexec):
if not self.fakeworker:
- self.fakeworker[''] = self._start_worker(True, rqexec)
+ for mc in self.rqdata.dataCaches:
+ self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
def teardown_workers(self):
self.teardown = True
except:
return None
+ (mc, fn, tn) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
+ if taskname is None:
+ taskname = tn
+
if self.stamppolicy == "perfile":
fulldeptree = False
else:
fulldeptree = True
stampwhitelist = []
if self.stamppolicy == "whitelist":
- stampwhitelist = self.rqdata.stampfnwhitelist
+ stampwhitelist = self.rqdata.stampfnwhitelist[mc]
- fn = fn_from_tid(tid)
- if taskname is None:
- taskname = taskname_from_tid(tid)
-
- stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
+ stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
# If the stamp is missing, it's not current
if not os.access(stampfile, os.F_OK):
logger.debug(2, "Stampfile %s not available", stampfile)
return False
# If it's a 'nostamp' task, it's not current
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
return False
t1 = get_timestamp(stampfile)
for dep in self.rqdata.runtaskentries[tid].depends:
if iscurrent:
- fn2 = fn_from_tid(dep)
- taskname2 = taskname_from_tid(dep)
- stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2)
- stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCache, fn2)
+ (mc2, fn2, taskname2) = split_tid(dep)
+ taskfn2 = taskfn_fromtid(dep)
+ stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
+ stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
t2 = get_timestamp(stampfile2)
t3 = get_timestamp(stampfile3)
if t3 and not t2:
logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
if self.state is runQueueFailed:
- if not self.rqdata.taskData.tryaltconfigs:
- raise bb.runqueue.TaskFailure(self.rqexe.failed_fns)
- for fn in self.rqexe.failed_fns:
- self.rqdata.taskData.fail_fn(fn)
+ if not self.rqdata.taskData[''].tryaltconfigs:
+ raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
+ for tid in self.rqexe.failed_tids:
+ (mc, fn, tn) = split_tid(tid)
+ self.rqdata.taskData[mc].fail_fn(fn)
self.rqdata.reset()
if self.state is runQueueComplete:
def dump_signatures(self, options):
done = set()
bb.note("Reparsing files to collect dependency data")
+ bb_cache = bb.cache.NoCache(self.cooker.databuilder)
for tid in self.rqdata.runtaskentries:
- fn = fn_from_tid(tid)
+ fn = taskfn_fromtid(tid)
if fn not in done:
- the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn), self.cooker.data)
+ the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
done.add(fn)
- bb.parse.siggen.dump_sigs(self.rqdata.dataCache, options)
+ bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
return
valid_new = set()
for tid in self.rqdata.runtaskentries:
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid)
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ (mc, fn, taskname) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
noexec.append(tid)
continue
sq_fn.append(fn)
- sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
+ sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
sq_hash.append(self.rqdata.runtaskentries[tid].hash)
sq_taskname.append(taskname)
sq_task.append(tid)
for tid in invalidtasks:
- fn = fn_from_tid(tid)
- pn = self.rqdata.dataCache.pkg_fn[fn]
- taskname = taskname_from_tid(tid)
+ (mc, fn, taskname) = split_tid(tid)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
h = self.rqdata.runtaskentries[tid].hash
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
match = None
self.build_stamps = {}
self.build_stamps2 = []
- self.failed_fns = []
+ self.failed_tids = []
self.stampcache = {}
# worker must have died?
pass
- if len(self.failed_fns) != 0:
+ if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return
self.rq.read_workers()
return self.rq.active_fds()
- if len(self.failed_fns) != 0:
+ if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return True
taskdata = {}
taskdeps.add(task)
for dep in taskdeps:
- fn = fn_from_tid(dep)
- pn = self.rqdata.dataCache.pkg_fn[fn]
- taskname = taskname_from_tid(dep)
+ (mc, fn, taskname) = split_tid(dep)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
taskdata[dep] = [pn, taskname, fn]
call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data }
tasknames = {}
fns = {}
for tid in self.rqdata.runtaskentries:
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid)
- taskdep = self.rqdata.dataCache.task_deps[fn]
- fns[tid] = fn
+ (mc, fn, taskname) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
+ fns[tid] = taskfn
tasknames[tid] = taskname
if 'noexec' in taskdep and taskname in taskdep['noexec']:
continue
covered_remove = bb.utils.better_eval(call, locs)
def removecoveredtask(tid):
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid) + '_setscene'
- bb.build.del_stamp(taskname, self.rqdata.dataCache, fn)
+ (mc, fn, taskname) = split_tid(tid)
+ taskname = taskname + '_setscene'
+ taskfn = taskfn_fromtid(tid)
+ bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
self.rq.scenequeue_covered.remove(tid)
toremove = covered_remove
logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
- event.fire(bb.event.StampUpdate(self.rqdata.target_pairs, self.rqdata.dataCache.stamp), self.cfgData)
+
+ for mc in self.rqdata.dataCaches:
+ target_pairs = []
+ for tid in self.rqdata.target_tids:
+ (tidmc, fn, taskname) = split_tid(tid)
+ if tidmc == mc:
+ target_pairs.append((fn, taskname))
+
+ event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
schedulers = self.get_schedulers()
for scheduler in schedulers:
Updates the state engine with the failure
"""
self.stats.taskFailed()
- fn = fn_from_tid(task)
- self.failed_fns.append(fn)
+ self.failed_tids.append(task)
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
- if self.rqdata.taskData.abort:
+ if self.rqdata.taskData[''].abort:
self.rq.state = runQueueCleanUp
def task_skip(self, task, reason):
if self.rqdata.setscenewhitelist:
# Check tasks that are going to run against the whitelist
def check_norun_task(tid, showerror=False):
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid)
+ (mc, fn, taskname) = split_tid(tid)
# Ignore covered tasks
if tid in self.rq.scenequeue_covered:
return False
if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
return False
# Ignore noexec tasks
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
return False
- pn = self.rqdata.dataCache.pkg_fn[fn]
+ pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
if showerror:
if tid in self.rqdata.runq_setscene_tids:
task = self.sched.next()
if task is not None:
- fn = fn_from_tid(task)
- taskname = taskname_from_tid(task)
+ (mc, fn, taskname) = split_tid(task)
+ taskfn = taskfn_fromtid(task)
if task in self.rq.scenequeue_covered:
logger.debug(2, "Setscene covered task %s", task)
self.task_skip(task, "existing")
return True
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
startevent = runQueueTaskStarted(task, self.stats, self.rq,
noexec=True)
self.runq_running.add(task)
self.stats.taskActive()
if not self.cooker.configuration.dry_run:
- bb.build.make_stamp(taskname, self.rqdata.dataCache, fn)
+ bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
self.task_complete(task)
return True
else:
taskdepdata = self.build_taskdepdata(task)
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
if not self.rq.fakeworker:
try:
logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
self.rq.state = runQueueFailed
return True
- self.rq.fakeworker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
- self.rq.fakeworker[''].process.stdin.flush()
+ self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
+ self.rq.fakeworker[mc].process.stdin.flush()
else:
- self.rq.worker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
- self.rq.worker[''].process.stdin.flush()
+ self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>")
+ self.rq.worker[mc].process.stdin.flush()
- self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
+ self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
self.build_stamps2.append(self.build_stamps[task])
self.runq_running.add(task)
self.stats.taskActive()
self.rq.read_workers()
return self.rq.active_fds()
- if len(self.failed_fns) != 0:
+ if len(self.failed_tids) != 0:
self.rq.state = runQueueFailed
return True
while next:
additional = []
for revdep in next:
- fn = fn_from_tid(revdep)
- pn = self.rqdata.dataCache.pkg_fn[fn]
- taskname = taskname_from_tid(revdep)
+ (mc, fn, taskname) = split_tid(revdep)
+ taskfn = taskfn_fromtid(revdep)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
deps = self.rqdata.runtaskentries[revdep].depends
- provides = self.rqdata.dataCache.fn_provides[fn]
+ provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
taskdepdata[revdep] = [pn, taskname, fn, deps, provides]
for revdep2 in deps:
if revdep2 not in taskdepdata:
# e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
# Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
for tid in self.rqdata.runq_setscene_tids:
- realtid = tid + "_setscene"
- idepends = self.rqdata.taskData.taskentries[realtid].idepends
+ (mc, fn, taskname) = split_tid(tid)
+ realtid = fn + ":" + taskname + "_setscene"
+ idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
for (depname, idependtask) in idepends:
- if depname not in self.rqdata.taskData.build_targets:
+ if depname not in self.rqdata.taskData[mc].build_targets:
continue
- depfn = self.rqdata.taskData.build_targets[depname][0]
+ depfn = self.rqdata.taskData[mc].build_targets[depname][0]
if depfn is None:
continue
deptid = depfn + ":" + idependtask.replace("_setscene", "")
noexec = []
stamppresent = []
for tid in self.sq_revdeps:
- fn = fn_from_tid(tid)
- taskname = taskname_from_tid(tid)
+ (mc, fn, taskname) = split_tid(tid)
+ taskfn = taskfn_fromtid(tid)
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
noexec.append(tid)
self.task_skip(tid)
- bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
+ bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
continue
if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
continue
sq_fn.append(fn)
- sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
+ sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
sq_hash.append(self.rqdata.runtaskentries[tid].hash)
sq_taskname.append(taskname)
sq_task.append(tid)
def check_taskfail(self, task):
if self.rqdata.setscenewhitelist:
realtask = task.split('_setscene')[0]
- fn = fn_from_tid(realtask)
- taskname = taskname_from_tid(realtask)
- pn = self.rqdata.dataCache.pkg_fn[fn]
+ (mc, fn, taskname) = split_tid(realtask)
+ pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
self.rq.state = runQueueCleanUp
if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
fn = fn_from_tid(nexttask)
foundtarget = False
- for target in self.rqdata.target_pairs:
- if target[0] == fn and target[1] == taskname_from_tid(nexttask):
- foundtarget = True
- break
+
+ if nexttask in self.rqdata.target_tids:
+ foundtarget = True
if not foundtarget:
logger.debug(2, "Skipping setscene for task %s" % nexttask)
self.task_skip(nexttask)
task = nexttask
break
if task is not None:
- fn = fn_from_tid(task)
- taskname = taskname_from_tid(task) + "_setscene"
+ (mc, fn, taskname) = split_tid(task)
+ taskfn = taskfn_fromtid(task)
+ taskname = taskname + "_setscene"
if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
self.task_failoutright(task)
return True
if self.cooker.configuration.force:
- for target in self.rqdata.target_pairs:
- if target[0] == fn and target[1] == taskname_from_tid(task):
- self.task_failoutright(task)
- return True
+ if task in self.rqdata.target_tids:
+ self.task_failoutright(task)
+ return True
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
bb.event.fire(startevent, self.cfgData)
- taskdep = self.rqdata.dataCache.task_deps[fn]
+ taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
if not self.rq.fakeworker:
self.rq.start_fakeworker(self)
- self.rq.fakeworker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>")
- self.rq.fakeworker[''].process.stdin.flush()
+ self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
+ self.rq.fakeworker[mc].process.stdin.flush()
else:
- self.rq.worker[''].process.stdin.write(b"<runtask>" + pickle.dumps((fn, task, taskname, True, self.cooker.collection.get_file_appends(fn), None)) + b"</runtask>")
- self.rq.worker[''].process.stdin.flush()
+ self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
+ self.rq.worker[mc].process.stdin.flush()
self.runq_running.add(task)
self.stats.taskActive()