Skip to content
This repository was archived by the owner on Jan 27, 2019. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions classes/chrpath.oeclass
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ CHRPATH_REPLACE_STAGEDIRS:cross = "1"
CHRPATH_REPLACE_STAGEDIRS:sdk-cross = "1"

do_chrpath[dirs] = "${D}"
do_chrpath[__async] = "${__ASYNC_CHRPATH}"
def do_chrpath(d):
import stat
import oelite.magiccache
Expand Down
2 changes: 2 additions & 0 deletions classes/fetch.oeclass
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def fetch_init(d):

do_fetch[dirs] = "${INGREDIENTS}"

do_fetch[__async] = "${__ASYNC_FETCH}"
def do_fetch(d):
sigfile_changed = False
for uri in d.get("__fetch"):
Expand All @@ -91,6 +92,7 @@ def do_fetch(d):
do_unpack[dirs] = "${SRCDIR}"
do_unpack[cleandirs] = "${SRCDIR}"

do_unpack[__async] = "${__ASYNC_UNPACK}"
def do_unpack(d):
for uri in d.get("__fetch"):
if "unpack" in uri.params:
Expand Down
2 changes: 2 additions & 0 deletions classes/package.oeclass
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ do_split[dirs] = "${PKGD} ${D}"
# The FILES_* and ALLOW_EMPTY_* variables are exclusive for do_split task
META_EMIT_PREFIX += "split:FILES_${PN} split:ALLOW_EMPTY"

do_split[__async] = "${__ASYNC_SPLIT}"
def do_split(d):
import errno, stat

Expand Down Expand Up @@ -151,6 +152,7 @@ LICENSE[emit] = "do_package"
HOMEPAGE[emit] = "do_package"
MAINTAINER[emit] = "do_package"

do_package[__async] = "${__ASYNC_PACKAGE}"
def do_package(d):
import bb, os

Expand Down
1 change: 1 addition & 0 deletions classes/stage.oeclass
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ do_stage[dirs] = "${STAGE_DIR}"
do_stage[recdeptask] = "DEPENDS:do_package"

do_stage[import] = "set_stage"
do_stage[__async] = "${__ASYNC_STAGE}"
def do_stage(d):
def get_dstdir(cwd, package):
return os.path.join(cwd, package.type)
Expand Down
95 changes: 95 additions & 0 deletions lib/oelite/function.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import warnings
import re
import subprocess
import traceback

class OEliteFunction(object):

Expand Down Expand Up @@ -68,6 +69,22 @@ def start(self, cwd):
self.result = True


# Making a PythonFunction run asynchronously is not that easy:
#
# (1) we cannot use threads, since many of the functions
# (e.g. do_fetch, do_unpack) expect to have a specific $CWD, and
# that's a global resource in the process - those functions would fail
# immediately when the main thread chdirs away.
#
# (2) we cannot just fork() and do everything in the child, since some
# PythonFunctions really must mutate state in the main oe process
# (most notably all hook functions that run during and immediately
# after recipe parsing).
#
# (3) even if we do (2) on an opt-in basis, I'm not entirely convinced
# we never rely on e.g. do_unpack changing that task's
# metadata. Nevertheless, this is what we'll try to do.

class PythonFunction(OEliteFunction):

def __init__(self, meta, var, name=None, tmpdir=None, recursion_path=None,
Expand Down Expand Up @@ -99,9 +116,87 @@ def __init__(self, meta, var, name=None, tmpdir=None, recursion_path=None,
self.function = l[var]
self.set_os_environ = set_os_environ
self.result = False
self.async = bool(int(meta.get_flag(var, "__async", expand=oelite.meta.CLEAN_EXPANSION) or 0))
super(PythonFunction, self).__init__(meta, var, name, tmpdir)
return

def _start(self):
if not self.async:
self.result = self()
return
# prevent duplicate output from stdio buffers
sys.stdout.flush()
sys.stderr.flush()

self.childpid = os.fork()
# This raise OSError on error, so there's no < 0 case to consider.
if self.childpid > 0:
# parent
return

# child

# If there's an exception, we want to get as much info as
# possible printed, not just the stringification of the
# exception object itself. The traceback module "exactly
# mimics the behavior of the Python interpreter when it prints
# a stack trace".

# We can only tell our parent how it went via our exit
# code. Important: We cannot call sys.exit(), since that is
# implemented by raising SystemExit, and we really must not
# return from this function - otherwise we go all the way back
# to the main loop in baker.py, get caught by the try-finally
# block, which then triggers the "wait for remaining tasks"
# logic, and we fail miserably since we do not have the child
# being waited for (that's us!). So we use
# os._exit(). However, we then need to ensure proper buffer
# flushing etc. manually.
exitcode = 0
try:
ret = self()
if not ret:
exitcode = 1
except:
traceback.print_exc()
exitcode = 2
# We don't want any silly error during what should be the
# proper way to shutdown manually to interfere with the exit
# code.
try:
sys.stdout.flush()
sys.stderr.flush()
# What else do we need to do?
finally:
os._exit(exitcode)
assert(0) # not reached

def wait(self, poll=False):
if not self.async:
assert(self.result is True or self.result is False)
return self.result

flags = 0
if poll:
flags = os.WNOHANG

pid, status = os.waitpid(self.childpid, flags)
if not pid:
# This should only happen if we passed WNOHANG.
assert(poll)
return None

assert(pid == self.childpid)
if os.WIFEXITED(status):
if os.WEXITSTATUS(status) == 0:
return True
print "forked python process exited with status %d" % os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
print "forked python process killed from signal %d" % os.WTERMSIG(status)
else:
print "forked python process died for unknown reason (%d)" % status
return False

def __call__(self):

if self.set_os_environ:
Expand Down