summaryrefslogtreecommitdiff
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/autotools.bbclass42
-rw-r--r--meta/classes/base.bbclass168
-rw-r--r--meta/classes/binconfig.bbclass6
-rw-r--r--meta/classes/bootimg.bbclass4
-rw-r--r--meta/classes/cpan-base.bbclass4
-rw-r--r--meta/classes/cpan.bbclass5
-rw-r--r--meta/classes/cpan_build.bbclass4
-rw-r--r--meta/classes/cross-canadian.bbclass7
-rw-r--r--meta/classes/cross.bbclass27
-rw-r--r--meta/classes/crosssdk.bbclass4
-rw-r--r--meta/classes/distrodata.bbclass110
-rw-r--r--meta/classes/distutils-common-base.bbclass23
-rw-r--r--meta/classes/distutils.bbclass20
-rw-r--r--meta/classes/gtk-icon-cache.bbclass2
-rw-r--r--meta/classes/image.bbclass7
-rw-r--r--meta/classes/imagetest-qemu.bbclass52
-rw-r--r--meta/classes/insane.bbclass32
-rw-r--r--meta/classes/kernel-yocto.bbclass112
-rw-r--r--meta/classes/kernel.bbclass23
-rw-r--r--meta/classes/libc-package.bbclass3
-rw-r--r--meta/classes/license.bbclass103
-rw-r--r--meta/classes/native.bbclass5
-rw-r--r--meta/classes/nativesdk.bbclass5
-rw-r--r--meta/classes/package.bbclass22
-rw-r--r--meta/classes/package_deb.bbclass154
-rw-r--r--meta/classes/package_ipk.bbclass70
-rw-r--r--meta/classes/package_rpm.bbclass257
-rw-r--r--meta/classes/patch.bbclass4
-rw-r--r--meta/classes/poky-autobuild-notifier.bbclass6
-rw-r--r--meta/classes/poky-image.bbclass5
-rw-r--r--meta/classes/poky.bbclass29
-rw-r--r--meta/classes/populate_sdk.bbclass82
-rw-r--r--meta/classes/populate_sdk_deb.bbclass60
-rw-r--r--meta/classes/populate_sdk_ipk.bbclass44
-rw-r--r--meta/classes/populate_sdk_rpm.bbclass80
-rw-r--r--meta/classes/python-dir.bbclass2
-rw-r--r--meta/classes/qmake2.bbclass3
-rw-r--r--meta/classes/qt4e.bbclass18
-rw-r--r--meta/classes/qt4x11.bbclass9
-rw-r--r--meta/classes/rm_work.bbclass34
-rw-r--r--meta/classes/rootfs_deb.bbclass111
-rw-r--r--meta/classes/rootfs_ipk.bbclass47
-rw-r--r--meta/classes/rootfs_rpm.bbclass179
-rw-r--r--meta/classes/sanity.bbclass32
-rw-r--r--meta/classes/setuptools.bbclass8
-rw-r--r--meta/classes/siteconfig.bbclass15
-rw-r--r--meta/classes/sstate.bbclass168
-rw-r--r--meta/classes/staging.bbclass56
-rw-r--r--meta/classes/tinderclient.bbclass6
-rw-r--r--meta/classes/toolchain-scripts.bbclass34
-rw-r--r--meta/classes/update-rc.d.bbclass8
-rw-r--r--meta/classes/utility-tasks.bbclass31
-rw-r--r--meta/classes/utils.bbclass26
53 files changed, 1458 insertions, 910 deletions
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 324cbff47..d43ae6d88 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -38,6 +38,12 @@ def autotools_set_crosscompiling(d):
return " cross_compiling=yes"
return ""
+def append_libtool_sysroot(d):
+ # Only supply libtool sysroot option for non-native packages
+ if not bb.data.inherits_class('native', d):
+ return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
+ return ""
+
# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
CONFIGUREOPTS = " --build=${BUILD_SYS} \
@@ -56,14 +62,13 @@ CONFIGUREOPTS = " --build=${BUILD_SYS} \
--includedir=${includedir} \
--oldincludedir=${oldincludedir} \
--infodir=${infodir} \
- --mandir=${mandir}"
+ --mandir=${mandir} \
+ ${@append_libtool_sysroot(d)}"
oe_runconf () {
if [ -x ${S}/configure ] ; then
cfgcmd="${S}/configure \
- ${CONFIGUREOPTS} \
- ${EXTRA_OECONF} \
- $@"
+ ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
oenote "Running $cfgcmd..."
$cfgcmd || oefatal "oe_runconf failed"
else
@@ -104,9 +109,12 @@ autotools_do_configure() {
AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
automake --version
echo "AUTOV is $AUTOV"
- install -d ${STAGING_DATADIR}/aclocal
- install -d ${STAGING_DATADIR}/aclocal-$AUTOV
- acpaths="$acpaths -I${STAGING_DATADIR}/aclocal-$AUTOV -I ${STAGING_DATADIR}/aclocal"
+ if [ -d ${STAGING_DATADIR}/aclocal-$AUTOV ]; then
+ acpaths="$acpaths -I${STAGING_DATADIR}/aclocal-$AUTOV"
+ fi
+ if [ -d ${STAGING_DATADIR}/aclocal ]; then
+ acpaths="$acpaths -I ${STAGING_DATADIR}/aclocal"
+ fi
# autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
# like it was auto-generated. Work around this by blowing it away
# by hand, unless the package specifically asked not to run aclocal.
@@ -150,22 +158,10 @@ autotools_do_configure() {
autotools_do_install() {
oe_runmake 'DESTDIR=${D}' install
-}
-
-PACKAGE_PREPROCESS_FUNCS += "autotools_prepackage_lamangler"
-
-autotools_prepackage_lamangler () {
- for i in `find ${PKGD} -name "*.la"` ; do \
- sed -i -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
- -e 's:${D}::g;' \
- -e 's:-I${WORKDIR}\S*: :g;' \
- -e 's:-L${WORKDIR}\S*: :g;' \
- $i
- done
-}
-
-autotools_stage_dir() {
- sysroot_stage_dir $1 $2
+ # Info dir listing isn't interesting at this point so remove it if it exists.
+ if [ -e "${D}${infodir}/dir" ]; then
+ rm -f ${D}${infodir}/dir
+ fi
}
inherit siteconfig
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index c60048bd5..edb65eb96 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -107,7 +107,7 @@ python base_scenefunction () {
python base_do_setscene () {
for f in (bb.data.getVar('SCENEFUNCS', d, 1) or '').split():
bb.build.exec_func(f, d)
- if not os.path.exists(bb.data.getVar('STAMP', d, 1) + ".do_setscene"):
+ if not os.path.exists(bb.build.stampfile("do_setscene", d)):
bb.build.make_stamp("do_setscene", d)
}
do_setscene[selfstamp] = "1"
@@ -116,146 +116,38 @@ addtask setscene before do_fetch
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
python base_do_fetch() {
- import sys
+
+ src_uri = (bb.data.getVar('SRC_URI', d, True) or "").split()
+ if len(src_uri) == 0:
+ return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
- src_uri = bb.data.getVar('SRC_URI', localdata, 1)
- if not src_uri:
- return 1
-
- try:
- bb.fetch.init(src_uri.split(),d)
- except bb.fetch.NoMethodError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("No method: %s" % value)
- except bb.MalformedUrl:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Malformed URL: %s" % value)
-
- try:
- bb.fetch.go(localdata)
- except bb.fetch.MissingParameterError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Missing parameters: %s" % value)
- except bb.fetch.FetchError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Fetch failed: %s" % value)
- except bb.fetch.MD5SumError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("MD5 failed: %s" % value)
- except:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Unknown fetch Error: %s" % value)
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.download()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
}
-def subprocess_setup():
- import signal
- # Python installs a SIGPIPE handler by default. This is usually not what
- # non-Python subprocesses expect.
- # SIGPIPE errors are known issues with gzip/bash
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
-def oe_unpack_file(file, data, url = None):
- import subprocess
- if not url:
- url = "file://%s" % file
- dots = file.split(".")
- if dots[-1] in ['gz', 'bz2', 'Z']:
- efile = os.path.join(bb.data.getVar('WORKDIR', data, 1),os.path.basename('.'.join(dots[0:-1])))
- else:
- efile = file
- cmd = None
- if file.endswith('.tar'):
- cmd = 'tar x --no-same-owner -f %s' % file
- elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
- cmd = 'tar xz --no-same-owner -f %s' % file
- elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'):
- cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
- elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
- cmd = 'gzip -dc %s > %s' % (file, efile)
- elif file.endswith('.bz2'):
- cmd = 'bzip2 -dc %s > %s' % (file, efile)
- elif file.endswith('.tar.xz'):
- cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
- elif file.endswith('.xz'):
- cmd = 'xz -dc %s > %s' % (file, efile)
- elif file.endswith('.zip') or file.endswith('.jar'):
- cmd = 'unzip -q -o'
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if 'dos' in parm:
- cmd = '%s -a' % cmd
- cmd = "%s '%s'" % (cmd, file)
- elif os.path.isdir(file):
- filesdir = os.path.realpath(bb.data.getVar("FILESDIR", data, 1))
- destdir = "."
- if file[0:len(filesdir)] == filesdir:
- destdir = file[len(filesdir):file.rfind('/')]
- destdir = destdir.strip('/')
- if len(destdir) < 1:
- destdir = "."
- elif not os.access("%s/%s" % (os.getcwd(), destdir), os.F_OK):
- os.makedirs("%s/%s" % (os.getcwd(), destdir))
- cmd = 'cp -pPR %s %s/%s/' % (file, os.getcwd(), destdir)
- else:
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if not 'patch' in parm:
- # The "destdir" handling was specifically done for FILESPATH
- # items. So, only do so for file:// entries.
- if type == "file" and path.find("/") != -1:
- destdir = path.rsplit("/", 1)[0]
- else:
- destdir = "."
- bb.mkdirhier("%s/%s" % (os.getcwd(), destdir))
- cmd = 'cp %s %s/%s/' % (file, os.getcwd(), destdir)
-
- if not cmd:
- return True
-
- dest = os.path.join(os.getcwd(), os.path.basename(file))
- if os.path.exists(dest):
- if os.path.samefile(file, dest):
- return True
-
- # Change to subdir before executing command
- save_cwd = os.getcwd();
- parm = bb.decodeurl(url)[5]
- if 'subdir' in parm:
- newdir = ("%s/%s" % (os.getcwd(), parm['subdir']))
- bb.mkdirhier(newdir)
- os.chdir(newdir)
-
- cmd = "PATH=\"%s\" %s" % (bb.data.getVar('PATH', data, 1), cmd)
- bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
- ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)
-
- os.chdir(save_cwd)
-
- return ret == 0
-
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
python base_do_unpack() {
- import re
+ src_uri = (bb.data.getVar('SRC_URI', d, True) or "").split()
+ if len(src_uri) == 0:
+ return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
- src_uri = bb.data.getVar('SRC_URI', localdata, True)
- if not src_uri:
- return
- for url in src_uri.split():
- try:
- local = bb.data.expand(bb.fetch.localpath(url, localdata), localdata)
- except bb.MalformedUrl, e:
- raise FuncFailed('Unable to generate local path for malformed uri: %s' % e)
- if local is None:
- continue
- local = os.path.realpath(local)
- ret = oe_unpack_file(local, localdata, url)
- if not ret:
- raise bb.build.FuncFailed()
+ rootdir = bb.data.getVar('WORKDIR', localdata, True)
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.unpack(rootdir)
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
}
GIT_CONFIG = "${STAGING_DIR_NATIVE}/usr/etc/gitconfig"
@@ -418,8 +310,9 @@ python () {
commercial_license = bb.data.getVar('COMMERCIAL_LICENSE', d, 1)
import re
- if commercial_license and re.search(pn, commercial_license):
- bb.debug("Skipping %s because it's commercially licensed" % pn)
+ pnr = pn.replace('+', "\+")
+ if commercial_license and re.search(pnr, commercial_license):
+ bb.debug(1, "Skipping %s because it's commercially licensed" % pn)
raise bb.parse.SkipPackage("because it requires commercial license to ship product")
# If we're building a target package we need to use fakeroot (pseudo)
@@ -532,7 +425,8 @@ python () {
for s in srcuri.split():
if not s.startswith("file://"):
continue
- local = bb.data.expand(bb.fetch.localpath(s, d), d)
+ fetcher = bb.fetch2.Fetch([s], d)
+ local = fetcher.localpath(s)
for mp in paths:
if local.startswith(mp):
#bb.note("overriding PACKAGE_ARCH from %s to %s" % (pkg_arch, mach_arch))
@@ -576,14 +470,12 @@ python do_cleanall() {
dl_dir = bb.data.getVar('DL_DIR', localdata, True)
dl_dir = os.path.realpath(dl_dir)
- src_uri = bb.data.getVar('SRC_URI', localdata, True)
- if not src_uri:
+ src_uri = (bb.data.getVar('SRC_URI', localdata, True) or "").split()
+ if len(src_uri) == 0:
return
- for url in src_uri.split():
- try:
- local = bb.data.expand(bb.fetch.localpath(url, localdata), localdata)
- except bb.MalformedUrl, e:
- raise FuncFailed('Unable to generate local path for malformed uri: %s' % e)
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ for url in src_uri:
+ local = fetcher.localpath(url)
if local is None:
continue
local = os.path.realpath(local)
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
index 73ca4d621..8e22d2d29 100644
--- a/meta/classes/binconfig.bbclass
+++ b/meta/classes/binconfig.bbclass
@@ -47,8 +47,8 @@ SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
binconfig_sysroot_preprocess () {
for config in `find ${S} -name '${BINCONFIG_GLOB}'`; do
configname=`basename $config`
- install -d ${SYSROOT_DESTDIR}${STAGING_BINDIR_CROSS}
- cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${STAGING_BINDIR_CROSS}/$configname
- chmod u+x ${SYSROOT_DESTDIR}${STAGING_BINDIR_CROSS}/$configname
+ install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
+ chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
done
}
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
index f4949f55b..49ee85ea7 100644
--- a/meta/classes/bootimg.bbclass
+++ b/meta/classes/bootimg.bbclass
@@ -49,7 +49,7 @@ inherit syslinux
build_boot_bin() {
install -d ${HDDDIR}
- install -m 0644 ${STAGING_DIR}/${MACHINE}${HOST_VENDOR}-${HOST_OS}/kernel/bzImage \
+ install -m 0644 ${STAGING_DIR_HOST}/kernel/bzImage \
${HDDDIR}/vmlinuz
if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
@@ -83,7 +83,7 @@ build_boot_bin() {
# Install the kernel
- install -m 0644 ${STAGING_DIR}/${MACHINE}${HOST_VENDOR}-${HOST_OS}/kernel/bzImage \
+ install -m 0644 ${STAGING_DIR_HOST}/kernel/bzImage \
${ISODIR}/vmlinuz
# Install the configuration files
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
index 1d13ff3f0..6cd2aa340 100644
--- a/meta/classes/cpan-base.bbclass
+++ b/meta/classes/cpan-base.bbclass
@@ -2,7 +2,7 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
+FILES_${PN} += "${libdir}/perl ${datadir}/perl"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
RDEPENDS += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
@@ -27,7 +27,7 @@ def get_perl_version(d):
# Determine where the library directories are
def perl_get_libdirs(d):
libdir = bb.data.getVar('libdir', d, 1)
- libdirs = libdir + '/perl5'
+ libdirs = libdir + '/perl'
return libdirs
def is_target(d):
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index 513f0b3ce..1cfd0221d 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -4,18 +4,19 @@
inherit cpan-base
EXTRA_CPANFLAGS ?= ""
+EXTRA_PERLFLAGS ?= ""
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
# Env var which tells perl where the perl include files are
export PERL_INC = "${STAGING_LIBDIR}/perl/${@get_perl_version(d)}/CORE"
-export PERL_LIB = "${STAGING_DATADIR}/perl/${@get_perl_version(d)}"
+export PERL_LIB = "${STAGING_LIBDIR}/perl/${@get_perl_version(d)}"
export PERL_ARCHLIB = "${STAGING_LIBDIR}/perl/${@get_perl_version(d)}"
cpan_do_configure () {
export PERL5LIB="${PERL_ARCHLIB}"
- yes '' | perl Makefile.PL ${EXTRA_CPANFLAGS}
+ yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
. ${STAGING_LIBDIR}/perl/config.sh
# Use find since there can be a Makefile generated for each Makefile.PL
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
index dd8d5434d..cc503a424 100644
--- a/meta/classes/cpan_build.bbclass
+++ b/meta/classes/cpan_build.bbclass
@@ -26,8 +26,8 @@ cpan_build_do_configure () {
perl Build.PL --installdirs vendor \
--destdir ${D} \
- --install_path lib="${datadir}/perl5" \
- --install_path arch="${libdir}/perl5" \
+ --install_path lib="${datadir}/perl" \
+ --install_path arch="${libdir}/perl" \
--install_path script=${bindir} \
--install_path bin=${bindir} \
--install_path bindoc=${mandir}/man1 \
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
index 9edce31cb..1a045ba16 100644
--- a/meta/classes/cross-canadian.bbclass
+++ b/meta/classes/cross-canadian.bbclass
@@ -15,6 +15,7 @@ STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${S
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
OLD_MULTIMACH_ARCH := "${MULTIMACH_ARCH}"
+OLD_MULTIMACH_TARGET_SYS := "${MULTIMACH_TARGET_SYS}"
OLD_PACKAGE_ARCH := ${BASE_PACKAGE_ARCH}
BASE_PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
python () {
@@ -29,7 +30,6 @@ MULTIMACH_TARGET_SYS = "${MULTIMACH_ARCH}${HOST_VENDOR}-${HOST_OS}"
INHIBIT_DEFAULT_DEPS = "1"
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}-nativesdk"
-STAGING_DIR_TARGET = "${STAGING_DIR}/${OLD_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}"
@@ -43,6 +43,9 @@ HOST_OS = "${SDK_OS}"
HOST_PREFIX = "${SDK_PREFIX}"
HOST_CC_ARCH = "${SDK_CC_ARCH}"
+#assign DPKG_ARCH
+DPKG_ARCH = "${SDK_ARCH}"
+
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
CXXFLAGS = "${BUILDSDK_CFLAGS}"
@@ -81,3 +84,5 @@ export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
# Cross-canadian packages need to pull in nativesdk dynamic libs
SHLIBSDIR = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${BUILD_OS}/shlibs"
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
index 7c20be0b8..f9fd07a9a 100644
--- a/meta/classes/cross.bbclass
+++ b/meta/classes/cross.bbclass
@@ -20,6 +20,11 @@ HOST_OS = "${BUILD_OS}"
HOST_PREFIX = "${BUILD_PREFIX}"
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+STAGING_DIR_HOST = "${STAGING_DIR}/${BASEPKG_HOST_SYS}"
+
+export PKG_CONFIG_DIR = "${STAGING_DIR}/${BASE_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${BASE_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
CXXFLAGS = "${BUILD_CFLAGS}"
@@ -40,30 +45,20 @@ target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
# Overrides for paths
+CROSS_TARGET_SYS_DIR = "${MULTIMACH_TARGET_SYS}"
prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
base_prefix = "${STAGING_DIR_NATIVE}"
exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
-bindir = "${exec_prefix}/bin/${MULTIMACH_TARGET_SYS}"
+bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
sbindir = "${bindir}"
base_bindir = "${bindir}"
base_sbindir = "${bindir}"
-libdir = "${exec_prefix}/lib/${MULTIMACH_TARGET_SYS}"
-libexecdir = "${exec_prefix}/libexec/${MULTIMACH_TARGET_SYS}"
+libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
+libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
+
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}"
do_install () {
oe_runmake 'DESTDIR=${D}' install
}
-#
-# Override the default sysroot staging copy since this won't look like a target system
-#
-sysroot_stage_all() {
- sysroot_stage_dir ${D} ${SYSROOT_DESTDIR}
- install -d ${SYSROOT_DESTDIR}${STAGING_DIR_TARGET}${target_base_libdir}/
- install -d ${SYSROOT_DESTDIR}${STAGING_DIR_TARGET}${target_libdir}/
- mv ${SYSROOT_DESTDIR}${target_base_libdir}/* ${SYSROOT_DESTDIR}${STAGING_DIR_TARGET}${target_base_libdir}/ || true
- mv ${SYSROOT_DESTDIR}${target_libdir}/* ${SYSROOT_DESTDIR}${STAGING_DIR_TARGET}${target_libdir}/ || true
-}
-
-
-
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
index 23db163ba..75fcfefc0 100644
--- a/meta/classes/crosssdk.bbclass
+++ b/meta/classes/crosssdk.bbclass
@@ -1,7 +1,5 @@
inherit cross
-PACKAGES = ""
-
BASE_PACKAGE_ARCH = "${SDK_ARCH}"
PACKAGE_ARCH = "${BASE_PACKAGE_ARCH}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${SDK_OS}"
@@ -18,3 +16,5 @@ target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
target_exec_prefix = "${SDKPATHNATIVE}${exec_prefix_nativesdk}"
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
index 0378790f7..31e7420cf 100644
--- a/meta/classes/distrodata.bbclass
+++ b/meta/classes/distrodata.bbclass
@@ -3,8 +3,6 @@ require conf/distro/include/distro_tracking_fields.inc
addhandler distro_eventhandler
python distro_eventhandler() {
- from bb.event import Handled, NotHandled
- # if bb.event.getName(e) == "TaskStarted":
if bb.event.getName(e) == "BuildStarted":
"""initialize log files."""
@@ -25,7 +23,7 @@ python distro_eventhandler() {
f.close()
bb.utils.unlockfile(lf)
- return NotHandled
+ return
}
addtask distrodata_np
@@ -213,9 +211,6 @@ do_distrodataall() {
addhandler checkpkg_eventhandler
python checkpkg_eventhandler() {
- from bb.event import Handled, NotHandled
- # if bb.event.getName(e) == "TaskStarted":
-
if bb.event.getName(e) == "BuildStarted":
"""initialize log files."""
logpath = bb.data.getVar('LOG_DIR', e.data, 1)
@@ -234,8 +229,15 @@ python checkpkg_eventhandler() {
f.write("Package\tOwner\tURI Type\tVersion\tTracking\tUpstream\tTMatch\tRMatch\n")
f.close()
bb.utils.unlockfile(lf)
-
- return NotHandled
+ """initialize log files for package report system"""
+ logfile2 = os.path.join(logpath, "get_pkg_info.%s.log" % bb.data.getVar('DATETIME', e.data, 1))
+ if not os.path.exists(logfile2):
+ slogfile2 = os.path.join(logpath, "get_pkg_info.log")
+ if os.path.exists(slogfile2):
+ os.remove(slogfile2)
+ os.system("touch %s" % logfile2)
+ os.symlink(logfile2, slogfile2)
+ return
}
addtask checkpkg
@@ -262,8 +264,8 @@ python do_checkpkg() {
prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*[\-_]" # match most patterns which uses "-" as separator to version digits
prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
prefix = "(%s|%s)" % (prefix1, prefix2)
- suffix = "(tar\.gz|tgz|tar\.bz2|zip)"
- suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2")
+ suffix = "(tar\.gz|tgz|tar\.bz2|zip|xz)"
+ suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz")
sinterstr = "(?P<name>%s?)(?P<ver>.*)" % prefix
sdirstr = "(?P<name>%s)(?P<ver>.*)\.(?P<type>%s$)" % (prefix, suffix)
@@ -307,42 +309,31 @@ python do_checkpkg() {
"""
def internal_fetch_wget(url, d, tmpf):
status = "ErrFetchUnknown"
- try:
- """
- Clear internal url cache as it's a temporary check. Not doing so will have
- bitbake check url multiple times when looping through a single url
- """
- fn = bb.data.getVar('FILE', d, 1)
- bb.fetch.urldata_cache[fn] = {}
- bb.fetch.init([url], d)
- except bb.fetch.NoMethodError:
- status = "ErrFetchNoMethod"
- except:
- status = "ErrInitUrlUnknown"
- else:
- """
- To avoid impacting bitbake build engine, this trick is required for reusing bitbake
- interfaces. bb.fetch.go() is not appliable as it checks downloaded content in ${DL_DIR}
- while we don't want to pollute that place. So bb.fetch.checkstatus() is borrowed here
- which is designed for check purpose but we override check command for our own purpose
- """
- ld = bb.data.createCopy(d)
- bb.data.setVar('CHECKCOMMAND_wget', "/usr/bin/env wget -t 1 --passive-ftp -O %s '${URI}'" \
+ """
+ Clear internal url cache as it's a temporary check. Not doing so will have
+ bitbake check url multiple times when looping through a single url
+ """
+ fn = bb.data.getVar('FILE', d, 1)
+ bb.fetch2.urldata_cache[fn] = {}
+
+ """
+ To avoid impacting bitbake build engine, this trick is required for reusing bitbake
+ interfaces. bb.fetch.go() is not appliable as it checks downloaded content in ${DL_DIR}
+ while we don't want to pollute that place. So bb.fetch2.checkstatus() is borrowed here
+ which is designed for check purpose but we override check command for our own purpose
+ """
+ ld = bb.data.createCopy(d)
+ bb.data.setVar('CHECKCOMMAND_wget', "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12\" '${URI}'" \
% tmpf.name, d)
- bb.data.update_data(ld)
-
- try:
- bb.fetch.checkstatus(ld)
- except bb.fetch.MissingParameterError:
- status = "ErrMissParam"
- except bb.fetch.FetchError:
- status = "ErrFetch"
- except bb.fetch.MD5SumError:
- status = "ErrMD5Sum"
- except:
- status = "ErrFetchUnknown"
- else:
- status = "SUCC"
+ bb.data.update_data(ld)
+
+ try:
+ fetcher = bb.fetch2.Fetch([url], ld)
+ fetcher.checkstatus()
+ status = "SUCC"
+ except bb.fetch2.BBFetchException, e:
+ status = "ErrFetch"
+
return status
"""
@@ -425,7 +416,7 @@ python do_checkpkg() {
"""match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
pn1 = re.search("^%s" % prefix, curname).group()
s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
- searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
+ searchstr = "[hH][rR][eE][fF]=\"%s\".*>" % s
reg = re.compile(searchstr)
valid = 0
@@ -443,7 +434,7 @@ python do_checkpkg() {
status = "ErrParseDir"
else:
"""newver still contains a full package name string"""
- status = re.search("(\d+[.\-_])*\d+", newver[1]).group()
+ status = re.search("(\d+[.\-_])*[0-9a-zA-Z]+", newver[1]).group()
elif not len(fhtml):
status = "ErrHostNoDir"
@@ -464,11 +455,23 @@ python do_checkpkg() {
logpath = bb.data.getVar('LOG_DIR', d, 1)
bb.utils.mkdirhier(logpath)
logfile = os.path.join(logpath, "checkpkg.csv")
+ """initialize log files for package report system"""
+ logfile2 = os.path.join(logpath, "get_pkg_info.log")
"""generate package information from .bb file"""
pname = bb.data.getVar('PN', d, 1)
pdesc = bb.data.getVar('DESCRIPTION', d, 1)
pgrp = bb.data.getVar('SECTION', d, 1)
+ pversion = bb.data.getVar('PV', d, 1)
+ plicense = bb.data.getVar('LICENSE',d,1)
+ psection = bb.data.getVar('SECTION',d,1)
+ phome = bb.data.getVar('HOMEPAGE', d, 1)
+ prelease = bb.data.getVar('PR',d,1)
+ ppriority = bb.data.getVar('PRIORITY',d,1)
+ pdepends = bb.data.getVar('DEPENDS',d,1)
+ pbugtracker = bb.data.getVar('BUGTRACKER',d,1)
+ ppe = bb.data.getVar('PE',d,1)
+ psrcuri = bb.data.getVar('SRC_URI',d,1)
found = 0
for uri in src_uri.split():
@@ -614,6 +617,14 @@ python do_checkpkg() {
(pname, maintainer, pproto, pcurver, pmver, pupver, pmstatus, pstatus))
f.close()
bb.utils.unlockfile(lf)
+
+ """write into get_pkg_info log file to supply data for package report system"""
+ lf2 = bb.utils.lockfile(logfile2 + ".lock")
+ f2 = open(logfile2, "a")
+ f2.write("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n" % \
+ (pname,pversion,pupver,plicense,psection, phome,prelease, ppriority,pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri))
+ f2.close()
+ bb.utils.unlockfile(lf2)
}
addtask checkpkgall after do_checkpkg
@@ -625,9 +636,6 @@ do_checkpkgall() {
#addhandler check_eventhandler
python check_eventhandler() {
- from bb.event import Handled, NotHandled
- # if bb.event.getName(e) == "TaskStarted":
-
if bb.event.getName(e) == "BuildStarted":
import oe.distro_check as dc
tmpdir = bb.data.getVar('TMPDIR', e.data, 1)
@@ -645,7 +653,7 @@ python check_eventhandler() {
os.symlink(logfile, slogfile)
bb.data.setVar('LOG_FILE', logfile, e.data)
- return NotHandled
+ return
}
addtask distro_check
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
index 01bf9eaeb..f66a5cd57 100644
--- a/meta/classes/distutils-common-base.bbclass
+++ b/meta/classes/distutils-common-base.bbclass
@@ -1,19 +1,10 @@
+inherit python-dir
+
EXTRA_OEMAKE = ""
export STAGING_INCDIR
export STAGING_LIBDIR
-def python_dir(d):
- import os, bb
- staging_incdir = bb.data.getVar( "STAGING_INCDIR", d, 1 )
- for majmin in "2.6 2.5 2.4 2.3".split():
- if os.path.exists( "%s/python%s" % ( staging_incdir, majmin ) ): return "python%s" % majmin
- if not "python-native" in bb.data.getVar( "DEPENDS", d, 1 ).split():
- raise "No Python in STAGING_INCDIR. Forgot to build python-native ?"
- return "INVALID"
-
-PYTHON_DIR = "${@python_dir(d)}"
-
PACKAGES = "${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
@@ -21,10 +12,10 @@ FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
FILES_${PN}-dev += "\
${datadir}/pkgconfig \
${libdir}/pkgconfig \
- ${libdir}/${PYTHON_DIR}/site-packages/*.la \
+ ${PYTHON_SITEPACKAGES_DIR}/*.la \
"
-FILES_${PN}-dbg = "\
- ${libdir}/${PYTHON_DIR}/site-packages/.debug \
- ${libdir}/${PYTHON_DIR}/site-packages/*/.debug \
- ${libdir}/${PYTHON_DIR}/site-packages/*/*/.debug \
+FILES_${PN}-dbg += "\
+ ${PYTHON_SITEPACKAGES_DIR}/.debug \
+ ${PYTHON_SITEPACKAGES_DIR}/*/.debug \
+ ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
"
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
index 245ef7d85..7e6fbc0c0 100644
--- a/meta/classes/distutils.bbclass
+++ b/meta/classes/distutils.bbclass
@@ -16,9 +16,7 @@ distutils_do_compile() {
}
distutils_stage_headers() {
- install -d ${STAGING_DIR_HOST}${libdir}/${PYTHON_DIR}/site-packages
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
oefatal "python setup.py install_headers execution failed."
@@ -27,18 +25,18 @@ distutils_stage_headers() {
distutils_stage_all() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- install -d ${STAGING_DIR_HOST}${libdir}/${PYTHON_DIR}/site-packages
- PYTHONPATH=${STAGING_DIR_HOST}${libdir}/${PYTHON_DIR}/site-packages \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
oefatal "python setup.py install (stage) execution failed."
}
distutils_do_install() {
- install -d ${D}${libdir}/${PYTHON_DIR}/site-packages
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}/${libdir}/${PYTHON_DIR}/site-packages \
+ PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py install ${DISTUTILS_INSTALL_ARGS} || \
oefatal "python setup.py install execution failed."
@@ -59,15 +57,17 @@ distutils_do_install() {
done
fi
- rm -f ${D}${libdir}/${PYTHON_DIR}/site-packages/easy-install.pth
-
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+
#
# FIXME: Bandaid against wrong datadir computation
#
if test -e ${D}${datadir}/share; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
fi
-
+
+ # These are generated files, on really slow systems the storage/speed trade off
+ # might be worth it, but in general it isn't
find ${D}${libdir}/${PYTHON_DIR}/site-packages -iname '*.pyo' -exec rm {} \;
}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index 524c2f0c4..dcabaf544 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -9,7 +9,7 @@ if [ "x$D" != "x" ]; then
fi
# Update the pixbuf loaders in case they haven't been registered yet
-gdk-pixbuf-query-loaders > /etc/gtk-2.0/gdk-pixbuf.loaders
+GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 9fa0155c8..14de3e199 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -39,7 +39,7 @@ python () {
# If we don't do this we try and run the mapping hooks while parsing which is slow
# bitbake should really provide something to let us know this...
- if bb.data.getVar('__RUNQUEUE_DO_NOT_USE_EXTERNALLY', d, True) is not None:
+ if bb.data.getVar('BB_WORKERCONTEXT', d, True) is not None:
runtime_mapping_rename("PACKAGE_INSTALL", d)
runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", d)
}
@@ -155,7 +155,6 @@ insert_feed_uris () {
}
log_check() {
- set +x
for target in $*
do
lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
@@ -164,14 +163,12 @@ log_check() {
if test -e "$lf_path"
then
- rootfs_${IMAGE_PKGTYPE}_log_check $target $lf_path
+ ${IMAGE_PKGTYPE}_log_check $target $lf_path
else
echo "Cannot find logfile [$lf_path]"
fi
echo "Logfile is clean"
done
-
- set -x
}
# set '*' as the rootpassword so the images
diff --git a/meta/classes/imagetest-qemu.bbclass b/meta/classes/imagetest-qemu.bbclass
index 4b3ddfc36..8301df845 100644
--- a/meta/classes/imagetest-qemu.bbclass
+++ b/meta/classes/imagetest-qemu.bbclass
@@ -5,6 +5,9 @@ TEST_LOG ?= "${LOG_DIR}/qemuimagetests"
TEST_RESULT ?= "${TEST_DIR}/result"
TEST_TMP ?= "${TEST_DIR}/tmp"
TEST_SCEN ?= "sanity"
+TEST_STATUS ?= "${TEST_TMP}/status"
+TARGET_IPSAVE ?= "${TEST_TMP}/target_ip"
+TEST_SERIALIZE ?= "1"
python do_qemuimagetest() {
qemuimagetest_main(d)
@@ -34,10 +37,21 @@ def qemuimagetest_main(d):
machine = bb.data.getVar('MACHINE', d, 1)
pname = bb.data.getVar('PN', d, 1)
+ """function to save test cases running status"""
+ def teststatus(test, status, index, length):
+ test_status = bb.data.getVar('TEST_STATUS', d, 1)
+ if not os.path.exists(test_status):
+ raise bb.build.FuncFailed("No test status file existing under TEST_TMP")
+
+ f = open(test_status, "w")
+ f.write("\t%-15s%-15s%-15s%-15s\n" % ("Case", "Status", "Number", "Total"))
+ f.write("\t%-15s%-15s%-15s%-15s\n" % (case, status, index, length))
+ f.close()
+
"""funtion to run each case under scenario"""
def runtest(scen, case, fulltestpath):
resultpath = bb.data.getVar('TEST_RESULT', d, 1)
- testpath = bb.data.getVar('TEST_DIR', d, 1)
+ tmppath = bb.data.getVar('TEST_TMP', d, 1)
"""initialize log file for testcase"""
logpath = bb.data.getVar('TEST_LOG', d, 1)
@@ -45,20 +59,23 @@ def qemuimagetest_main(d):
caselog = os.path.join(logpath, "%s/log_%s.%s" % (scen, case, bb.data.getVar('DATETIME', d, 1)))
os.system("touch %s" % caselog)
-
"""export TEST_TMP, TEST_RESULT, DEPLOY_DIR and QEMUARCH"""
os.environ["PATH"] = bb.data.getVar("PATH", d, True)
- os.environ["TEST_TMP"] = testpath
+ os.environ["TEST_TMP"] = tmppath
os.environ["TEST_RESULT"] = resultpath
os.environ["DEPLOY_DIR"] = bb.data.getVar("DEPLOY_DIR", d, True)
os.environ["QEMUARCH"] = machine
os.environ["QEMUTARGET"] = pname
os.environ["DISPLAY"] = bb.data.getVar("DISPLAY", d, True)
os.environ["POKYBASE"] = bb.data.getVar("POKYBASE", d, True)
+ os.environ["TOPDIR"] = bb.data.getVar("TOPDIR", d, True)
+ os.environ["TEST_STATUS"] = bb.data.getVar("TEST_STATUS", d, True)
+ os.environ["TARGET_IPSAVE"] = bb.data.getVar("TARGET_IPSAVE", d, True)
+ os.environ["TEST_SERIALIZE"] = bb.data.getVar("TEST_SERIALIZE", d, True)
"""run Test Case"""
bb.note("Run %s test in scenario %s" % (case, scen))
- os.system("%s | tee -a %s" % (fulltestpath, caselog))
+ os.system("%s" % fulltestpath)
"""Generate testcase list in runtime"""
def generate_list(testlist):
@@ -96,6 +113,18 @@ def qemuimagetest_main(d):
list.append((item, casefile, fulltestcase))
return list
+ """Clean tmp folder for testing"""
+ def clean_tmp():
+ tmppath = bb.data.getVar('TEST_TMP', d, 1)
+
+ if os.path.isdir(tmppath):
+ for f in os.listdir(tmppath):
+ tmpfile = os.path.join(tmppath, f)
+ os.remove(tmpfile)
+
+ """Before running testing, clean temp folder first"""
+ clean_tmp()
+
"""check testcase folder and create test log folder"""
testpath = bb.data.getVar('TEST_DIR', d, 1)
bb.utils.mkdirhier(testpath)
@@ -105,7 +134,13 @@ def qemuimagetest_main(d):
tmppath = bb.data.getVar('TEST_TMP', d, 1)
bb.utils.mkdirhier(tmppath)
-
+
+ """initialize test status file"""
+ test_status = bb.data.getVar('TEST_STATUS', d, 1)
+ if os.path.exists(test_status):
+ os.remove(test_status)
+ os.system("touch %s" % test_status)
+
"""initialize result file"""
resultpath = bb.data.getVar('TEST_RESULT', d, 1)
bb.utils.mkdirhier(resultpath)
@@ -128,9 +163,11 @@ def qemuimagetest_main(d):
fulllist = generate_list(testlist)
"""Begin testing"""
- for test in fulllist:
+ for index,test in enumerate(fulllist):
(scen, case, fullpath) = test
+ teststatus(case, "running", index, (len(fulllist) - 1))
runtest(scen, case, fullpath)
+ teststatus(case, "finished", index, (len(fulllist) - 1))
"""Print Test Result"""
ret = 0
@@ -149,6 +186,9 @@ def qemuimagetest_main(d):
bb.note(line)
f.close()
+ """Clean temp files for testing"""
+ clean_tmp()
+
if ret != 0:
raise bb.build.FuncFailed("Some testcases fail, pls. check test result and test log!!!")
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index b06d021b1..5d3ef92a2 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -153,8 +153,7 @@ def package_qa_handle_error(error_class, error_msg, name, path, d):
if fatal:
bb.error("QA Issue: %s" % error_msg)
else:
- # Use bb.warn here when it works
- bb.note("QA Issue: %s" % error_msg)
+ bb.warn("QA Issue: %s" % error_msg)
package_qa_write_error(error_class, name, path, d)
return not fatal
@@ -348,8 +347,8 @@ def package_qa_check_license(workdir, d):
if not lic_files:
# just throw a warning now. Once licensing data in entered for enough of the recipes,
# this will be converted into error and False will be returned.
- bb.warn(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
- return True
+ bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
+ return False
srcdir = bb.data.getVar('S', d, True)
@@ -357,10 +356,10 @@ def package_qa_check_license(workdir, d):
(type, host, path, user, pswd, parm) = bb.decodeurl(url)
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
- raise bb.build.FuncFailed( "LIC_FILES_CHKSUM points to invalid file: " + path)
+ raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to invalid file: " + path)
if 'md5' not in parm:
- bb.error("md5 checksum is not specified for ", url)
+ bb.error(pn + ": md5 checksum is not specified for ", url)
return False
beginline, endline = 0, 0
if 'beginline' in parm:
@@ -391,11 +390,11 @@ def package_qa_check_license(workdir, d):
os.unlink(tmplicfile)
if parm['md5'] == md5chksum:
- bb.note ("md5 checksum matched for ", url)
+ bb.note (pn + ": md5 checksum matched for ", url)
else:
- bb.error ("md5 data is not matching for ", url)
- bb.note ("The new md5 checksum is ", md5chksum)
- bb.note ("Check if the license information has changed, and if it has update the .bb file with correct license")
+ bb.error (pn + ": md5 data is not matching for ", url)
+ bb.error (pn + ": The new md5 checksum is ", md5chksum)
+ bb.error (pn + ": Check if the license information has changed in")
sane = False
return sane
@@ -428,11 +427,6 @@ def package_qa_check_staged(path,d):
path = os.path.join(root,file)
if file.endswith(".la"):
file_content = open(path).read()
- # Don't check installed status for native/cross packages
- if not bb.data.inherits_class("native", d) and not bb.data.inherits_class("cross", d):
- if installed in file_content:
- error_msg = "%s failed sanity test (installed) in path %s" % (file,root)
- sane = package_qa_handle_error(5, error_msg, "staging", path, d)
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
sane = package_qa_handle_error(8, error_msg, "staging", path, d)
@@ -534,9 +528,9 @@ python do_package_qa () {
checks = [package_qa_check_rpath, package_qa_check_dev,
package_qa_check_perm, package_qa_check_arch,
- package_qa_check_desktop,
+ package_qa_check_desktop, package_qa_hash_style,
package_qa_check_dbg]
- # package_qa_check_buildpaths, package_qa_hash_style
+ # package_qa_check_buildpaths,
walk_sane = True
rdepends_sane = True
for package in packages.split():
@@ -581,7 +575,7 @@ python do_qa_configure() {
os.path.join(root,"config.log")
if "config.log" in files:
if os.system(statement) == 0:
- bb.fatal("""This autoconf log indicates errors, it looked at host includes.
+ bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
Rerun configure task after fixing this. The path was '%s'""" % root)
if "configure.ac" in files:
@@ -605,5 +599,5 @@ Rerun configure task after fixing this. The path was '%s'""" % root)
Missing inherit gettext?""" % config)
if not package_qa_check_license(workdir, d):
- bb.fatal("Licensing warning: LIC_FILES_CHKSUM does not match, please fix")
+ bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
}
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
index 8e820122e..15802fabd 100644
--- a/meta/classes/kernel-yocto.bbclass
+++ b/meta/classes/kernel-yocto.bbclass
@@ -1,60 +1,15 @@
S = "${WORKDIR}/linux"
-# Determine which branch to fetch and build. Not all branches are in the
-# upstream repo (but will be locally created after the fetchers run) so
-# a fallback branch needs to be chosen.
-#
-# The default machine 'UNDEFINED'. If the machine is not set to a specific
-# branch in this recipe or in a recipe extension, then we fallback to a
-# branch that is always present 'standard'. This sets the KBRANCH variable
-# and is used in the SRC_URI. The machine is then set back to ${MACHINE},
-# since futher processing will use that to create local branches
-python __anonymous () {
- import bb, re
-
- version = bb.data.getVar("LINUX_VERSION", d, 1)
- # 2.6.34 signifies the old-style tree, so we need some temporary
- # conditional processing based on the kernel version.
- if version == "2.6.34":
- bb.data.setVar("KBRANCH", "${KMACHINE}-${LINUX_KERNEL_TYPE}", d)
- bb.data.setVar("KMETA", "wrs_meta", d)
- mach = bb.data.getVar("KMACHINE", d, 1)
- if mach == "UNDEFINED":
- bb.data.setVar("KBRANCH", "standard", d)
- bb.data.setVar("KMACHINE", "${MACHINE}", d)
- # track the global configuration on a bootstrapped BSP
- bb.data.setVar("SRCREV_machine", "${SRCREV_meta}", d)
- bb.data.setVar("BOOTSTRAP", "t", d)
- else:
- # The branch for a build is:
- # yocto/<kernel type>/${KMACHINE} or
- # yocto/<kernel type>/${KMACHINE}/base
- bb.data.setVar("KBRANCH", bb.data.expand("yocto/${LINUX_KERNEL_TYPE}/${KMACHINE}",d), d)
- bb.data.setVar("KMETA", "meta", d)
-
- mach = bb.data.getVar("KMACHINE", d, 1)
- # drop the "/base" if it was on the KMACHINE
- kmachine = mach.replace('/base','')
- # and then write KMACHINE back
- bb.data.setVar('KMACHINE_' + bb.data.expand("${MACHINE}",d), kmachine, d)
-
- if mach == "UNDEFINED":
- bb.data.setVar("KBRANCH", "yocto/standard/base", d)
- bb.data.setVar('KMACHINE_' + bb.data.expand("${MACHINE}",d), bb.data.expand("${MACHINE}",d), d)
- bb.data.setVar("SRCREV_machine", "standard", d)
- bb.data.setVar("BOOTSTRAP", "t", d)
-}
-
do_patch() {
cd ${S}
if [ -f ${WORKDIR}/defconfig ]; then
defconfig=${WORKDIR}/defconfig
fi
- if [ -n "${BOOTSTRAP}" ]; then
- kbranch="yocto/${LINUX_KERNEL_TYPE}/${KMACHINE}"
- else
- kbranch=${KBRANCH}
+ kbranch=${KBRANCH}
+ if [ -n "${YOCTO_KERNEL_EXTERNAL_BRANCH}" ]; then
+ # switch from a generic to a specific branch
+ kbranch=${YOCTO_KERNEL_EXTERNAL_BRANCH}
fi
# simply ensures that a branch of the right name has been created
@@ -68,7 +23,7 @@ do_patch() {
if [ -n "${KERNEL_FEATURES}" ]; then
addon_features="--features ${KERNEL_FEATURES}"
fi
- updateme ${addon_features} ${ARCH} ${WORKDIR}
+ updateme ${addon_features} ${ARCH} ${MACHINE} ${WORKDIR}
if [ $? -ne 0 ]; then
echo "ERROR. Could not update ${kbranch}"
exit 1
@@ -83,11 +38,11 @@ do_patch() {
}
do_kernel_checkout() {
- if [ -d ${WORKDIR}/.git/refs/remotes/origin ]; then
+ if [ -d ${WORKDIR}/git/.git/refs/remotes/origin ]; then
echo "Fixing up git directory for ${LINUX_KERNEL_TYPE}/${KMACHINE}"
rm -rf ${S}
mkdir ${S}
- mv ${WORKDIR}/.git ${S}
+ mv ${WORKDIR}/git/.git ${S}
if [ -e ${S}/.git/packed-refs ]; then
cd ${S}
@@ -119,23 +74,34 @@ addtask kernel_checkout before do_patch after do_unpack
do_kernel_configme() {
echo "[INFO] doing kernel configme"
+ kbranch=${KBRANCH}
+ if [ -n "${YOCTO_KERNEL_EXTERNAL_BRANCH}" ]; then
+ # switch from a generic to a specific branch
+ kbranch=${YOCTO_KERNEL_EXTERNAL_BRANCH}
+ fi
+
cd ${S}
- configme --reconfig
+ configme --reconfig --output ${B} ${kbranch} ${MACHINE}
if [ $? -ne 0 ]; then
echo "ERROR. Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
exit 1
fi
-
+
echo "# Global settings from linux recipe" >> ${B}/.config
echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
}
-do_kernel_configcheck() {
- echo "[INFO] validating kernel configuration"
- cd ${B}/..
- kconf_check ${B}/.config ${B} ${S} ${B} ${LINUX_VERSION} ${KMACHINE}-${LINUX_KERNEL_TYPE}
-}
+python do_kernel_configcheck() {
+ import bb, re, string, sys, commands
+ bb.plain("NOTE: validating kernel configuration")
+
+ pathprefix = "export PATH=%s; " % bb.data.getVar('PATH', d, True)
+ cmd = bb.data.expand("cd ${B}/..; kconf_check -${LINUX_KERNEL_TYPE}-config-${LINUX_VERSION} ${B} ${S} ${B} ${KBRANCH}",d )
+ ret, result = commands.getstatusoutput("%s%s" % (pathprefix, cmd))
+
+ bb.plain( "%s" % result )
+}
# Ensure that the branches (BSP and meta) are on the locatios specified by
# their SRCREV values. If they are NOT on the right commits, the branches
@@ -148,37 +114,37 @@ do_validate_branches() {
target_meta_head="${SRCREV_meta}"
# nothing to do if bootstrapping
- if [ -n "${BOOTSTRAP}" ]; then
+ if [ -n "${YOCTO_KERNEL_EXTERNAL_BRANCH}" ]; then
return
fi
current=`git branch |grep \*|sed 's/^\* //'`
if [ -n "$target_branch_head" ] && [ "$branch_head" != "$target_branch_head" ]; then
if [ -n "${KERNEL_REVISION_CHECKING}" ]; then
- git show ${target_branch_head} > /dev/null 2>&1
- if [ $? -eq 0 ]; then
- echo "Forcing branch $current to ${target_branch_head}"
- git branch -m $current $current-orig
- git checkout -b $current ${target_branch_head}
- else
+ ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
+ if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
echo "ERROR ${target_branch_head} is not a valid commit ID."
echo "The kernel source tree may be out of sync"
exit 1
- fi
+ else
+ echo "Forcing branch $current to ${target_branch_head}"
+ git branch -m $current $current-orig
+ git checkout -b $current ${target_branch_head}
+ fi
fi
fi
if [ "$meta_head" != "$target_meta_head" ]; then
if [ -n "${KERNEL_REVISION_CHECKING}" ]; then
- git show ${target_meta_head} > /dev/null 2>&1
- if [ $? -eq 0 ]; then
- echo "Forcing branch meta to ${target_meta_head}"
- git branch -m ${KMETA} ${KMETA}-orig
- git checkout -b ${KMETA} ${target_meta_head}
- else
+ ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
+ if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
echo "ERROR ${target_meta_head} is not a valid commit ID"
echo "The kernel source tree may be out of sync"
exit 1
+ else
+ echo "Forcing branch meta to ${target_meta_head}"
+ git branch -m ${KMETA} ${KMETA}-orig
+ git checkout -b ${KMETA} ${target_meta_head}
fi
fi
fi
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index b2f8132ab..59de148cc 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -40,8 +40,8 @@ HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
TARGET_LD_KERNEL_ARCH ?= ""
HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}"
-KERNEL_LD = "${LD}${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}${TOOLCHAIN_OPTIONS}"
+KERNEL_LD = "${LD}${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}${TOOLCHAIN_OPTIONS}"
# Where built kernel lies in the kernel tree
KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
@@ -173,7 +173,7 @@ kernel_do_install() {
scripts/ihex2fw scripts/kallsyms scripts/pnmtologo scripts/basic/docproc \
scripts/basic/fixdep scripts/basic/hash scripts/dtc/dtc \
scripts/genksyms/genksyms scripts/kconfig/conf scripts/mod/mk_elfconfig \
- scripts/mod/modpost"
+ scripts/mod/modpost scripts/recordmcount"
rm -rf $kerneldir/scripts/*.o
rm -rf $kerneldir/scripts/basic/*.o
rm -rf $kerneldir/scripts/kconfig/*.o
@@ -185,7 +185,7 @@ kernel_do_install() {
}
sysroot_stage_all_append() {
- sysroot_stage_dir ${D}/kernel ${SYSROOT_DESTDIR}${STAGING_KERNEL_DIR}
+ sysroot_stage_dir ${D}/kernel ${SYSROOT_DESTDIR}/kernel
}
@@ -292,13 +292,16 @@ module_conf_rfcomm = "alias bt-proto-3 rfcomm"
python populate_packages_prepend () {
def extract_modinfo(file):
- import re
- tmpfile = os.tmpnam()
+ import tempfile, re
+ tempfile.tempdir = bb.data.getVar("WORKDIR", d, 1)
+ tf = tempfile.mkstemp()
+ tmpfile = tf[1]
cmd = "PATH=\"%s\" %sobjcopy -j .modinfo -O binary %s %s" % (bb.data.getVar("PATH", d, 1), bb.data.getVar("HOST_PREFIX", d, 1) or "", file, tmpfile)
os.system(cmd)
f = open(tmpfile)
l = f.read().split("\000")
f.close()
+ os.close(tf[0])
os.unlink(tmpfile)
exp = re.compile("([^=]+)=(.*)")
vals = {}
@@ -441,7 +444,7 @@ python populate_packages_prepend () {
metapkg = "kernel-modules"
bb.data.setVar('ALLOW_EMPTY_' + metapkg, "1", d)
bb.data.setVar('FILES_' + metapkg, "", d)
- blacklist = [ 'kernel-dev', 'kernel-image', 'kernel-base', 'kernel-vmlinux' ]
+ blacklist = [ 'kernel-dev', 'kernel-image', 'kernel-base', 'kernel-vmlinux', 'perf' ]
for l in module_deps.values():
for i in l:
pkg = module_pattern % legitimize_package_name(re.match(module_regex, os.path.basename(i)).group(1))
@@ -484,16 +487,16 @@ kernel_do_deploy() {
if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
if test -e arch/${ARCH}/boot/uImage ; then
- cp arch/${ARCH}/boot/uImage ${DEPLOYDIR}/uImage-${PV}-${PR}-${MACHINE}-${DATETIME}.bin
+ cp arch/${ARCH}/boot/uImage ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
elif test -e arch/${ARCH}/boot/compressed/vmlinux ; then
${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
- uboot-mkimage -A ${ARCH} -O linux -T kernel -C none -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${DEPLOYDIR}/uImage-${PV}-${PR}-${MACHINE}-${DATETIME}.bin
+ uboot-mkimage -A ${ARCH} -O linux -T kernel -C none -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
rm -f linux.bin
else
${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
rm -f linux.bin.gz
gzip -9 linux.bin
- uboot-mkimage -A ${ARCH} -O linux -T kernel -C gzip -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz ${DEPLOYDIR}/uImage-${PV}-${PR}-${MACHINE}-${DATETIME}.bin
+ uboot-mkimage -A ${ARCH} -O linux -T kernel -C gzip -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
rm -f linux.bin.gz
fi
fi
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
index 4709b33c0..733f26b85 100644
--- a/meta/classes/libc-package.bbclass
+++ b/meta/classes/libc-package.bbclass
@@ -95,9 +95,6 @@ do_install() {
mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
done
rm -f ${D}{sysconfdir}/rpc
- rm -f ${D}${includedir}/scsi/sg.h
- rm -f ${D}${includedir}/scsi/scsi_ioctl.h
- rm -f ${D}${includedir}/scsi/scsi.h
rm -rf ${D}${datadir}/zoneinfo
rm -rf ${D}${libexecdir}/getconf
}
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
new file mode 100644
index 000000000..97d4e4e41
--- /dev/null
+++ b/meta/classes/license.bbclass
@@ -0,0 +1,103 @@
+# Populates LICENSE_DIRECTORY as set in poky.conf with the license files as set by
+# LIC_FILES_CHKSUM.
+# TODO:
+# - We should also enable the ability to put the generated license directory onto the
+# rootfs
+# - Gather up more generic licenses
+# - There is a real issue revolving around license naming standards. See license names
+# licenses.conf and compare them to the license names in the recipes. You'll see some
+# differences and that should be corrected.
+
+LICENSE_DIRECTORY ??= "${DEPLOY_DIR_IMAGE}/licenses"
+LICSSTATEDIR = "${WORKDIR}/license-destdir/"
+
+addtask populate_lic after do_patch before do_package
+do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
+do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
+python do_populate_lic() {
+ """
+ Populate LICENSE_DIRECTORY with licenses.
+ """
+ import os
+ import bb
+ import shutil
+
+ # All the license types for the package
+ license_types = bb.data.getVar('LICENSE', d, True)
+ # All the license files for the package
+ lic_files = bb.data.getVar('LIC_FILES_CHKSUM', d, True)
+ pn = bb.data.getVar('PN', d, True)
+ # The base directory we wrangle licenses to
+ destdir = os.path.join(bb.data.getVar('LICSSTATEDIR', d, True), pn)
+ # The license files are located in S/LIC_FILE_CHECKSUM.
+ srcdir = bb.data.getVar('S', d, True)
+ # Directory we store the generic licenses as set in poky.conf
+ generic_directory = bb.data.getVar('COMMON_LICENSE_DIR', d, True)
+ if not generic_directory:
+ raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
+
+ if not lic_files:
+ # No recipe should have an invalid license file. This is checked else
+ # where, but let's be pedantic
+ bb.note(pn + ": Recipe file does not have license file information.")
+ return True
+
+ for url in lic_files.split():
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ # We want the license file to be copied into the destination
+ srclicfile = os.path.join(srcdir, path)
+ ret = bb.copyfile(srclicfile, os.path.join(destdir, os.path.basename(path)))
+ # If the copy didn't occur, something horrible went wrong and we fail out
+ if ret is False or ret == 0:
+ bb.warn("%s could not be copied for some reason. It may not exist. WARN for now." % srclicfile)
+
+ # This takes some explaining.... we now are going to go an try to symlink
+ # to a generic file. But, with the way LICENSE works, a package can have multiple
+ # licenses. Some of them are, for example, GPLv2+, which means it can use that version
+ # of GPLv2 specified in it's license, or a later version of GPLv2. For the purposes of
+ # what we're doing here, we really don't track license revisions (although we may want to)
+ # So, we strip out the + and link to a generic GPLv2
+ #
+ # That said, there are some entries into LICENSE that either have no generic (bzip, zlib, ICS)
+ # or the LICENSE is messy (Apache 2.0 .... when they mean Apache-2.0). This should be corrected
+ # but it's outside of scope for this.
+ #
+ # Also, you get some clever license fields with logic in the field.
+ # I'm sure someone has written a logic parser for these fields, but if so, I don't know where it is.
+ # So what I do is just link to every license mentioned in the license field.
+
+ for license_type in license_types.replace('&', '').replace('+', '').replace('&', '').replace('|', '').replace('(', '').replace(')', '').split():
+ if os.path.isfile(os.path.join(generic_directory, license_type)):
+ gen_lic_dest = os.path.join(bb.data.getVar('LICENSE_DIRECTORY', d, True), "common-licenses")
+ try:
+ bb.mkdirhier(gen_lic_dest)
+ except:
+ pass
+
+ try:
+ bb.copyfile(os.path.join(generic_directory, license_type), os.path.join(gen_lic_dest, license_type))
+ except:
+ bb.warn("%s: No generic license file exists for: %s at %s" % (pn, license_type, generic_directory))
+ pass
+ try:
+ os.symlink(os.path.join(gen_lic_dest, license_type), os.path.join(destdir, "generic_" + license_type))
+ except:
+ bb.warn("%s: No generic license file exists for: %s at %s" % (pn, license_type, generic_directory))
+ pass
+ else:
+ bb.warn("%s: Something went wrong with copying: %s to %s" % (pn, license_type, generic_directory))
+ bb.warn("This could be either because we do not have a generic for this license or the LICENSE field is incorrect")
+ pass
+}
+
+SSTATETASKS += "do_populate_lic"
+do_populate_lic[sstate-name] = "populate-lic"
+do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
+do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
+
+python do_populate_lic_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_lic_setscene
+
+
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index 3ca9d6284..545a73eb8 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -60,6 +60,9 @@ base_prefix = "${STAGING_DIR_NATIVE}"
prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}"
+do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}"
+
# Since we actually install these into situ there is no staging prefix
STAGING_DIR_HOST = ""
STAGING_DIR_TARGET = ""
@@ -117,3 +120,5 @@ do_package_write_ipk[noexec] = "1"
do_package_write_deb[noexec] = "1"
do_package_write_rpm[noexec] = "1"
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
index 154bd827b..587a907c4 100644
--- a/meta/classes/nativesdk.bbclass
+++ b/meta/classes/nativesdk.bbclass
@@ -19,6 +19,8 @@ python () {
#STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}-nativesdk"
#STAGING_DIR_TARGET = "${STAGING_DIR}/${BASEPKG_TARGET_SYS}-nativesdk"
+STAGING_DIR_HOST = "${STAGING_DIR}/${BASEPKG_HOST_SYS}"
+STAGING_DIR_TARGET = "${STAGING_DIR}/${BASEPKG_TARGET_SYS}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
@@ -79,4 +81,5 @@ python __anonymous () {
bb.data.setVar("OVERRIDES", bb.data.getVar("OVERRIDES", d, False) + ":virtclass-nativesdk", d)
}
-
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index d39c694de..8f58ad03f 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -289,6 +289,8 @@ python package_do_split_locales() {
if mainpkg.find('-dev'):
mainpkg = mainpkg.replace('-dev', '')
+ summary = bb.data.getVar('SUMMARY', d, True) or pn
+ description = bb.data.getVar('DESCRIPTION', d, True) or ""
for l in locales:
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
@@ -296,7 +298,8 @@ python package_do_split_locales() {
bb.data.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l), d)
bb.data.setVar('RDEPENDS_' + pkg, '%s virtual-locale-%s' % (mainpkg, ln), d)
bb.data.setVar('RPROVIDES_' + pkg, '%s-locale %s-translation' % (pn, ln), d)
- bb.data.setVar('DESCRIPTION_' + pkg, '%s translation for %s' % (l, pn), d)
+ bb.data.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l), d)
+ bb.data.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l), d)
bb.data.setVar('PACKAGES', ' '.join(packages), d)
@@ -497,7 +500,8 @@ python emit_pkgdata() {
pkgdest = bb.data.getVar('PKGDEST', d, 1)
pkgdatadir = bb.data.getVar('PKGDESTWORK', d, True)
- lf = bb.utils.lockfile(bb.data.expand("${PACKAGELOCK}", d))
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(bb.data.expand("${PACKAGELOCK}", d), True)
data_file = pkgdatadir + bb.data.expand("/${PN}" , d)
f = open(data_file, 'w')
@@ -514,6 +518,7 @@ python emit_pkgdata() {
write_if_exists(sf, pkg, 'PV')
write_if_exists(sf, pkg, 'PR')
write_if_exists(sf, pkg, 'DESCRIPTION')
+ write_if_exists(sf, pkg, 'SUMMARY')
write_if_exists(sf, pkg, 'RDEPENDS')
write_if_exists(sf, pkg, 'RPROVIDES')
write_if_exists(sf, pkg, 'RRECOMMENDS')
@@ -558,7 +563,7 @@ if [ x"$D" = "x" ]; then
fi
}
-RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/rpmdeps"
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps"
# Collect perfile run-time dependency metadata
# Output:
@@ -606,7 +611,7 @@ python package_do_filedeps() {
# Determine dependencies
for pkg in packages.split():
- if pkg.endswith('-dbg'):
+ if pkg.endswith('-dbg') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-') != -1:
continue
# Process provides
@@ -649,6 +654,7 @@ python package_do_shlibs() {
shlibs_dir = bb.data.getVar('SHLIBSDIR', d, True)
shlibswork_dir = bb.data.getVar('SHLIBSWORKDIR', d, True)
+ # Take shared lock since we're only reading, not writing
lf = bb.utils.lockfile(bb.data.expand("${PACKAGELOCK}", d))
def linux_so(root, path, file):
@@ -878,6 +884,7 @@ python package_do_pkgconfig () {
if hdr == 'Requires':
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
+ # Take shared lock since we're only reading, not writing
lf = bb.utils.lockfile(bb.data.expand("${PACKAGELOCK}", d))
for pkg in packages.split():
@@ -1038,6 +1045,8 @@ python package_depchains() {
for suffix in pkgs:
for pkg in pkgs[suffix]:
+ if bb.data.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', d):
+ continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
@@ -1062,7 +1071,7 @@ PACKAGEFUNCS ?= "perform_packagecopy \
package_depchains \
emit_pkgdata"
-python package_do_package () {
+python do_package () {
packages = (bb.data.getVar('PACKAGES', d, True) or "").split()
if len(packages) < 1:
bb.debug(1, "No packages to build, skipping do_package")
@@ -1091,6 +1100,7 @@ do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST}"
do_package[sstate-inputdirs] = "${PKGDESTWORK} ${SHLIBSWORKDIR}"
do_package[sstate-outputdirs] = "${PKGDATA_DIR} ${SHLIBSDIR}"
do_package[sstate-lockfile] = "${PACKAGELOCK}"
+do_package[stamp-extra-info] = "${MACHINE}"
do_package_setscene[dirs] = "${STAGING_DIR}"
python do_package_setscene () {
@@ -1106,8 +1116,6 @@ do_package_write[noexec] = "1"
do_build[recrdeptask] += "do_package_write"
addtask package_write before do_build after do_package
-EXPORT_FUNCTIONS do_package do_package_write
-
#
# Helper functions for the package writing classes
#
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index 792a2b3ae..52bd264ea 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -67,6 +67,151 @@ python do_package_deb_install () {
os.putenv('PATH', path)
}
+#
+# Update the Packages index files in ${DEPLOY_DIR_DEB}
+#
+package_update_index_deb () {
+
+ local debarchs=""
+
+ if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
+ return
+ fi
+
+ for arch in ${PACKAGE_ARCHS}; do
+ sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
+ if [ -e ${DEPLOY_DIR_DEB}/$arch ]; then
+ debarchs="$debarchs $arch"
+ fi
+ if [ -e ${DEPLOY_DIR_DEB}/$sdkarch-nativesdk ]; then
+ debarchs="$debarchs $sdkarch-nativesdk"
+ fi
+ done
+
+ for arch in $debarchs; do
+ if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
+ continue;
+ fi
+ cd ${DEPLOY_DIR_DEB}/$arch
+ dpkg-scanpackages . | bzip2 > Packages.bz2
+ echo "Label: $arch" > Release
+ done
+}
+
+#
+# install a bunch of packages using apt
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_DEB - install root dir
+# INSTALL_BASEARCH_DEB - install base architecutre
+# INSTALL_ARCHS_DEB - list of available archs
+# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
+# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
+# INSTALL_TASK_DEB - task name
+
+package_install_internal_deb () {
+
+ local target_rootfs="${INSTALL_ROOTFS_DEB}"
+ local dpkg_arch="${INSTALL_BASEARCH_DEB}"
+ local archs="${INSTALL_ARCHS_DEB}"
+ local package_to_install="${INSTALL_PACKAGES_NORMAL_DEB}"
+ local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_DEB}"
+ local package_lingusa="${INSTALL_PACKAGES_LINGUAS_DEB}"
+ local task="${INSTALL_TASK_DEB}"
+
+ rm -f ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
+ rm -f ${STAGING_ETCDIR_NATIVE}/apt/preferences
+
+ priority=1
+ for arch in $archs; do
+ if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
+ continue;
+ fi
+
+ echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
+ (echo "Package: *"
+ echo "Pin: release l=$arch"
+ echo "Pin-Priority: $(expr 800 + $priority)"
+ echo) >> ${STAGING_ETCDIR_NATIVE}/apt/preferences
+ priority=$(expr $priority + 5)
+ done
+
+ tac ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev > ${STAGING_ETCDIR_NATIVE}/apt/sources.list
+
+ cat "${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample" \
+ | sed -e "s#Architecture \".*\";#Architecture \"${dpkg_arch}\";#" \
+ | sed -e "s:#ROOTFS#:${target_rootfs}:g" \
+ > "${STAGING_ETCDIR_NATIVE}/apt/apt-${task}.conf"
+
+ export APT_CONFIG="${STAGING_ETCDIR_NATIVE}/apt/apt-${task}.conf"
+
+ mkdir -p ${target_rootfs}/var/dpkg/info
+ mkdir -p ${target_rootfs}/var/dpkg/updates
+
+ > ${target_rootfs}/var/dpkg/status
+ > ${target_rootfs}/var/dpkg/available
+
+ apt-get update
+
+ # Uclibc builds don't provide this stuff..
+ if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
+ if [ ! -z "${package_lingusa}" ]; then
+ apt-get install glibc-localedata-i18n --force-yes --allow-unauthenticated
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ for i in ${package_lingusa}; do
+ apt-get install $i --force-yes --allow-unauthenticated
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ done
+ fi
+ fi
+
+ # normal install
+ for i in ${package_to_install}; do
+ apt-get install $i --force-yes --allow-unauthenticated
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ done
+
+ rm -f ${WORKDIR}/temp/log.do_${task}-attemptonly.${PID}
+ if [ ! -z "${package_attemptonly}" ]; then
+ for i in ${package_attemptonly}; do
+ apt-get install $i --force-yes --allow-unauthenticated >> ${WORKDIR}/temp/log.do_${task}-attemptonly.${PID} || true
+ done
+ fi
+
+ find ${target_rootfs} -name \*.dpkg-new | for i in `cat`; do
+ mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
+ done
+
+ # Mark all packages installed
+ sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${target_rootfs}/var/dpkg/status
+}
+
+deb_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "E:"
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
+
python do_package_deb () {
import re, copy
import textwrap
@@ -81,12 +226,6 @@ python do_package_deb () {
bb.error("PKGWRITEDIRDEB not defined, unable to package")
return
- dvar = bb.data.getVar('D', d, True)
- if not dvar:
- bb.error("D not defined, unable to package")
- return
- bb.mkdirhier(dvar)
-
packages = bb.data.getVar('PACKAGES', d, True)
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
@@ -101,9 +240,10 @@ python do_package_deb () {
bb.debug(1, "No packages; nothing to do")
return
+ pkgdest = bb.data.getVar('PKGDEST', d, True)
+
for pkg in packages.split():
localdata = bb.data.createCopy(d)
- pkgdest = bb.data.getVar('PKGDEST', d, True)
root = "%s/%s" % (pkgdest, pkg)
lf = bb.utils.lockfile(root + ".lock")
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 5ddd6c66e..3c2472bc1 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -62,6 +62,69 @@ python package_ipk_install () {
}
#
+# install a bunch of packages using opkg
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_IPK - install root dir
+# INSTALL_CONF_IPK - configuration file
+# INSTALL_PACKAGES_NORMAL_IPK - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_IPK - packages attemped to be installed only
+# INSTALL_PACKAGES_LINGUAS_IPK - additional packages for uclibc
+# INSTALL_TASK_IPK - task name
+
+package_install_internal_ipk() {
+
+ local target_rootfs="${INSTALL_ROOTFS_IPK}"
+ local conffile="${INSTALL_CONF_IPK}"
+ local package_to_install="${INSTALL_PACKAGES_NORMAL_IPK}"
+ local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_IPK}"
+ local package_lingusa="${INSTALL_PACKAGES_LINGUAS_IPK}"
+ local task="${INSTALL_TASK_IPK}"
+
+ mkdir -p ${target_rootfs}${localstatedir}/lib/opkg/
+
+ local ipkg_args="-f ${conffile} -o ${target_rootfs} --force-overwrite"
+
+ opkg-cl ${ipkg_args} update
+
+ # Uclibc builds don't provide this stuff...
+ if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
+ if [ ! -z "${package_lingusa}" ]; then
+ for i in ${package_lingusa}; do
+ opkg-cl ${ipkg_args} install $i
+ done
+ fi
+ fi
+
+ if [ ! -z "${package_to_install}" ]; then
+ opkg-cl ${ipkg_args} install ${package_to_install}
+ fi
+
+ if [ ! -z "${package_attemptonly}" ]; then
+ opkg-cl ${ipkg_args} install ${package_attemptonly} > "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}" || true
+ fi
+}
+
+ipk_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "exit 1" "Collected errors" ERR Fail
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
+
+#
# Update the Packages index files in ${DEPLOY_DIR_IPK}
#
package_update_index_ipk () {
@@ -133,17 +196,12 @@ python do_package_ipk () {
workdir = bb.data.getVar('WORKDIR', d, True)
outdir = bb.data.getVar('PKGWRITEDIRIPK', d, True)
- dvar = bb.data.getVar('D', d, True)
tmpdir = bb.data.getVar('TMPDIR', d, True)
pkgdest = bb.data.getVar('PKGDEST', d, True)
- if not workdir or not outdir or not dvar or not tmpdir:
+ if not workdir or not outdir or not tmpdir:
bb.error("Variables incorrectly set, unable to package")
return
- if not os.path.exists(dvar):
- bb.debug(1, "Nothing installed, nothing to do")
- return
-
packages = bb.data.getVar('PACKAGES', d, True)
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index fbe0626d6..cc262dc83 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -15,6 +15,8 @@ python package_rpm_install () {
bb.fatal("package_rpm_install not implemented!")
}
+RPMCONF_TARGET_BASE = "${DEPLOY_DIR_RPM}/solvedb"
+RPMCONF_HOST_BASE = "${DEPLOY_DIR_RPM}/solvedb-sdk"
#
# Update the Packages depsolver db in ${DEPLOY_DIR_RPM}
#
@@ -26,15 +28,18 @@ package_update_index_rpm () {
fi
packagedirs=""
+ packagedirs_sdk=""
for arch in $rpmarchs ; do
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
+ extension="-nativesdk"
+ if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
+ extension=""
+ fi
packagedirs="$packagedirs ${DEPLOY_DIR_RPM}/$arch"
- #packagedirs="$packagedirs ${DEPLOY_DIR_RPM}/$sdkarch-nativesdk"
+ packagedirs_sdk="$packagedirs_sdk ${DEPLOY_DIR_RPM}/$sdkarch$extension"
done
- #packagedirs="$packagedirs ${DEPLOY_DIR_RPM}/${SDK_ARCH}-${TARGET_ARCH}-canadian"
-
- cat /dev/null > ${DEPLOY_DIR_RPM}/solvedb.conf
+ cat /dev/null > ${RPMCONF_TARGET_BASE}.conf
for pkgdir in $packagedirs; do
if [ -e $pkgdir/ ]; then
echo "Generating solve db for $pkgdir..."
@@ -46,10 +51,27 @@ package_update_index_rpm () {
-D "_dbpath $pkgdir/solvedb" --justdb \
--noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
--ignoresize --nosignature --nodigest \
- -D "_dbi_tags_3 Packages:Name:Basenames:Providename:Nvra" \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync" \
+ -D "__dbi_txn create nofsync" \
+ $pkgdir/solvedb/manifest
+ echo $pkgdir/solvedb >> ${RPMCONF_TARGET_BASE}.conf
+ fi
+ done
+
+ cat /dev/null > ${RPMCONF_HOST_BASE}.conf
+ for pkgdir in $packagedirs_sdk; do
+ if [ -e $pkgdir/ ]; then
+ echo "Generating solve db for $pkgdir..."
+ rm -rf $pkgdir/solvedb
+ mkdir -p $pkgdir/solvedb
+ echo "# Dynamically generated solve manifest" >> $pkgdir/solvedb/manifest
+ find $pkgdir -maxdepth 1 -type f >> $pkgdir/solvedb/manifest
+ ${RPM} -i --replacepkgs --replacefiles --oldpackage \
+ -D "_dbpath $pkgdir/solvedb" --justdb \
+ --noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
+ --ignoresize --nosignature --nodigest \
+ -D "__dbi_txn create nofsync" \
$pkgdir/solvedb/manifest
- echo $pkgdir/solvedb >> ${DEPLOY_DIR_RPM}/solvedb.conf
+ echo $pkgdir/solvedb >> ${RPMCONF_HOST_BASE}.conf
fi
done
}
@@ -59,16 +81,223 @@ package_update_index_rpm () {
# generated depsolver db's...
#
package_generate_rpm_conf () {
- printf "_solve_dbpath " > ${DEPLOY_DIR_RPM}/solvedb.macro
+ printf "_solve_dbpath " > ${RPMCONF_TARGET_BASE}.macro
+ colon=false
+ for each in `cat ${RPMCONF_TARGET_BASE}.conf` ; do
+ if [ "$colon" == true ]; then
+ printf ":" >> ${RPMCONF_TARGET_BASE}.macro
+ fi
+ printf "%s" $each >> ${RPMCONF_TARGET_BASE}.macro
+ colon=true
+ done
+ printf "\n" >> ${RPMCONF_TARGET_BASE}.macro
+
+ printf "_solve_dbpath " > ${RPMCONF_HOST_BASE}.macro
colon=false
- for each in `cat ${DEPLOY_DIR_RPM}/solvedb.conf` ; do
+ for each in `cat ${RPMCONF_HOST_BASE}.conf` ; do
if [ "$colon" == true ]; then
- printf ":" >> ${DEPLOY_DIR_RPM}/solvedb.macro
+ printf ":" >> ${RPMCONF_HOST_BASE}.macro
fi
- printf "%s" $each >> ${DEPLOY_DIR_RPM}/solvedb.macro
+ printf "%s" $each >> ${RPMCONF_HOST_BASE}.macro
colon=true
done
- printf "\n" >> ${DEPLOY_DIR_RPM}/solvedb.macro
+ printf "\n" >> ${RPMCONF_HOST_BASE}.macro
+}
+
+rpm_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "Cannot find package" "exit 1" ERR Fail
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
+
+
+#
+# Resolve package names to filepaths
+# resolve_pacakge <pkgname> <solvdb conffile>
+#
+resolve_package_rpm () {
+ local pkg="$1"
+ local conffile="$2"
+ local pkg_name=""
+ for solve in `cat ${conffile}`; do
+ pkg_name=$(${RPM} -D "_dbpath $solve" -D "__dbi_txn create nofsync" -q --yaml $pkg | grep -i 'Packageorigin' | cut -d : -f 2)
+ if [ -n "$pkg_name" ]; then
+ break;
+ fi
+ done
+ echo $pkg_name
+}
+
+#
+# install a bunch of packages using rpm
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_RPM - install root dir
+# INSTALL_PLATFORM_RPM - main platform
+# INSTALL_PLATFORM_EXTRA_RPM - extra platform
+# INSTALL_CONFBASE_RPM - configuration file base name
+# INSTALL_PACKAGES_NORMAL_RPM - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_RPM - packages attemped to be installed only
+# INSTALL_PACKAGES_LINGUAS_RPM - additional packages for uclibc
+# INSTALL_PROVIDENAME_RPM - content for provide name
+# INSTALL_TASK_RPM - task name
+
+package_install_internal_rpm () {
+
+ local target_rootfs="${INSTALL_ROOTFS_RPM}"
+ local platform="${INSTALL_PLATFORM_RPM}"
+ local platform_extra="${INSTALL_PLATFORM_EXTRA_RPM}"
+ local confbase="${INSTALL_CONFBASE_RPM}"
+ local package_to_install="${INSTALL_PACKAGES_NORMAL_RPM}"
+ local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_RPM}"
+ local package_lingusa="${INSTALL_PACKAGES_LINGUAS_RPM}"
+ local providename="${INSTALL_PROVIDENAME_RPM}"
+ local task="${INSTALL_TASK_RPM}"
+
+ # Setup base system configuration
+ mkdir -p ${target_rootfs}/etc/rpm/
+ echo "${platform}-unknown-linux" >${target_rootfs}/etc/rpm/platform
+ if [ ! -z "$platform_extra" ]; then
+ for pt in $platform_extra ; do
+ echo "$pt-unknown-linux" >> ${target_rootfs}/etc/rpm/platform
+ done
+ fi
+
+ # Tell RPM that the "/" directory exist and is available
+ mkdir -p ${target_rootfs}/etc/rpm/sysinfo
+ echo "/" >${target_rootfs}/etc/rpm/sysinfo/Dirnames
+ if [ ! -z "$providename" ]; then
+ >>${target_rootfs}/etc/rpm/sysinfo/Providename
+ for provide in $providename ; do
+ echo $provide >> ${target_rootfs}/etc/rpm/sysinfo/Providename
+ done
+ fi
+
+ # Setup manifest of packages to install...
+ mkdir -p ${target_rootfs}/install
+ echo "# Install manifest" > ${target_rootfs}/install/install.manifest
+
+ # Uclibc builds don't provide this stuff...
+ if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
+ if [ ! -z "${package_lingusa}" ]; then
+ for pkg in ${package_lingusa}; do
+ echo "Processing $pkg..."
+ pkg_name=$(resolve_package_rpm $pkg ${confbase}.conf)
+ if [ -z "$pkg_name" ]; then
+ echo "Unable to find package $pkg!"
+ exit 1
+ fi
+ echo $pkg_name >> ${IMAGE_ROOTFS}/install/install.manifest
+ done
+ fi
+ fi
+
+ if [ ! -z "${package_to_install}" ]; then
+ for pkg in ${package_to_install} ; do
+ echo "Processing $pkg..."
+ pkg_name=$(resolve_package_rpm $pkg ${confbase}.conf)
+ if [ -z "$pkg_name" ]; then
+ echo "Unable to find package $pkg!"
+ exit 1
+ fi
+ echo $pkg_name >> ${target_rootfs}/install/install.manifest
+ done
+ fi
+
+ # Generate an install solution by doing a --justdb install, then recreate it with
+ # an actual package install!
+ ${RPM} -D "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ ${target_rootfs}/install/install.manifest
+
+ if [ ! -z "${package_attemptonly}" ]; then
+ echo "Adding attempt only packages..."
+ for pkg in ${package_attemptonly} ; do
+ echo "Processing $pkg..."
+ pkg_name=$(resolve_package_rpm $pkg ${confbase}.conf)
+ if [ -z "$pkg_name" ]; then
+ echo "Unable to find package $pkg!"
+ exit 1
+ fi
+ echo "Attempting $pkg_name..." >> "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}"
+ ${RPM} -D "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync private" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ $pkg_name >> "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}" || true
+ done
+ fi
+
+ #### Note: 'Recommends' is an arbitrary tag that means _SUGGESTS_ in Poky..
+ # Add any recommended packages to the image
+ # RPM does not solve for recommended packages because they are optional...
+ # So we query them and tree them like the ATTEMPTONLY packages above...
+ # Change the loop to "1" to run this code...
+ loop=0
+ if [ $loop -eq 1 ]; then
+ echo "Processing recommended packages..."
+ cat /dev/null > ${target_rootfs}/install/recommend.list
+ while [ $loop -eq 1 ]; do
+ # Dump the full set of recommends...
+ ${RPM} -D "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ -D "_dbpath ${IMAGE_ROOTFS}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync private" \
+ -qa --qf "[%{RECOMMENDS}\n]" | sort -u > ${target_rootfs}/install/recommend
+ # Did we add more to the list?
+ grep -v -x -F -f ${target_rootfs}/install/recommend.list ${target_rootfs}/install/recommend > ${target_rootfs}/install/recommend.new || true
+ # We don't want to loop unless there is a change to the list!
+ loop=0
+ cat ${target_rootfs}/install/recommend.new | \
+ while read pkg ; do
+ # Ohh there was a new one, we'll need to loop again...
+ loop=1
+ echo "Processing $pkg..."
+ pkg_name=$(resolve_package $pkg ${confbase}.conf)
+ if [ -z "$pkg_name" ]; then
+ echo "Unable to find package $pkg." >> "${WORKDIR}/temp/log.do_${task}_recommend.${PID}"
+ continue
+ fi
+ echo "Attempting $pkg_name..." >> "${WORKDIR}/temp/log.do_{task}_recommend.${PID}"
+ ${RPM} -D "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync private" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ $pkg_name >> "${WORKDIR}/temp/log.do_${task}_recommend.${PID}" 2>&1 || true
+ done
+ cat ${target_rootfs}/install/recommend.list ${target_rootfs}/install/recommend.new | sort -u > ${target_rootfs}/install/recommend.new.list
+ mv -f ${target_rootfs}/install/recommend.new.list ${target_rootfs}/install/recommend.list
+ rm ${target_rootfs}/install/recommend ${target_rootfs}/install/recommend.new
+ done
+ fi
+
+ # Now that we have a solution, pull out a list of what to install...
+ echo "Manifest: ${target_rootfs}/install/install.manifest"
+ ${RPM} -D "_dbpath ${target_rootfs}/install" -qa --yaml \
+ -D "__dbi_txn create nofsync private" \
+ | grep -i 'Packageorigin' | cut -d : -f 2 > ${target_rootfs}/install/install_solution.manifest
+
+ # Attempt install
+ ${RPM} --root ${target_rootfs} \
+ -D "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ -D "_dbpath ${rpmlibdir}" \
+ --noscripts --notriggers --noparentdirs --nolinktos \
+ -D "__dbi_txn create nofsync private" \
+ -Uhv ${target_rootfs}/install/install_solution.manifest
}
python write_specfile () {
@@ -193,7 +422,7 @@ python write_specfile () {
splitname = pkgname
- splitsummary = (bb.data.getVar('SUMMARY', d, True) or bb.data.getVar('DESCRIPTION', d, True) or ".")
+ splitsummary = (bb.data.getVar('SUMMARY', localdata, True) or bb.data.getVar('DESCRIPTION', localdata, True) or ".")
splitversion = (bb.data.getVar('PV', localdata, True) or "").replace('-', '+')
splitrelease = (bb.data.getVar('PR', localdata, True) or "")
splitepoch = (bb.data.getVar('PE', localdata, True) or "")
@@ -530,6 +759,8 @@ python do_package_rpm () {
cmd = cmd + " --define '_use_internal_dependency_generator 0'"
cmd = cmd + " --define '__find_requires " + outdepends + "'"
cmd = cmd + " --define '__find_provides " + outprovides + "'"
+ cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
+ cmd = cmd + " --define 'debug_package %{nil}'"
cmd = cmd + " -bb " + outspecfile
# Build the spec file!
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 7ba8abb72..80fd45f0e 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -58,9 +58,8 @@ python patch_do_patch() {
continue
if not local:
- bb.fetch.init([url],d)
url = bb.encodeurl((type, host, path, user, pswd, []))
- local = os.path.join('/', bb.fetch.localpath(url, d))
+ local = os.path.join('/', bb.fetch2.localpath(url, d))
local = bb.data.expand(local, d)
if "striplevel" in parm:
@@ -126,6 +125,7 @@ python patch_do_patch() {
raise bb.build.FuncFailed(str(sys.exc_value))
resolver.Resolve()
}
+patch_do_patch[vardepsexclude] = "DATE SRCDATE"
addtask patch after do_unpack
do_patch[dirs] = "${WORKDIR}"
diff --git a/meta/classes/poky-autobuild-notifier.bbclass b/meta/classes/poky-autobuild-notifier.bbclass
index 24b85c461..9ab3d064e 100644
--- a/meta/classes/poky-autobuild-notifier.bbclass
+++ b/meta/classes/poky-autobuild-notifier.bbclass
@@ -48,15 +48,15 @@ def do_autobuilder_failure_report(event):
addhandler poky_autobuilder_notifier_eventhandler
python poky_autobuilder_notifier_eventhandler() {
from bb import note, error, data
- from bb.event import NotHandled, getName
+ from bb.event import getName
if e.data is None:
- return NotHandled
+ return
name = getName(e)
if name == "TaskFailed":
do_autobuilder_failure_report(e)
- return NotHandled
+ return
}
diff --git a/meta/classes/poky-image.bbclass b/meta/classes/poky-image.bbclass
index 5f34ad11d..225584bcd 100644
--- a/meta/classes/poky-image.bbclass
+++ b/meta/classes/poky-image.bbclass
@@ -2,6 +2,9 @@
#
# Copyright (C) 2007 OpenedHand LTD
+LIC_FILES_CHKSUM = "file://${POKYBASE}/LICENSE;md5=3f40d7994397109285ec7b81fdeb3b58 \
+ file://${POKYBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
# IMAGE_FEATURES control content of images built with Poky.
#
# By default we install task-poky-boot and task-base packages - this gives us
@@ -79,7 +82,7 @@ POKY_BASE_INSTALL = '\
\
${@base_contains("IMAGE_FEATURES", ["nfs-server", "dev-pkgs"], "task-poky-nfs-server-dev", "",d)} \
\
- ${@base_contains("IMAGE_FEATURES", "package-management", "${ROOTFS_PKGMANAGE}", "",d)} \
+ ${@base_contains("IMAGE_FEATURES", "package-management", "${ROOTFS_PKGMANAGE}", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)} \
${@base_contains("IMAGE_FEATURES", "qt4-pkgs", "task-poky-qt-demos", "",d)} \
${POKY_EXTRA_INSTALL} \
'
diff --git a/meta/classes/poky.bbclass b/meta/classes/poky.bbclass
deleted file mode 100644
index 7d3c28ffe..000000000
--- a/meta/classes/poky.bbclass
+++ /dev/null
@@ -1,29 +0,0 @@
-PREMIRRORS_append () {
-bzr://.*/.* http://pokylinux.org/sources/
-cvs://.*/.* http://pokylinux.org/sources/
-git://.*/.* http://pokylinux.org/sources/
-hg://.*/.* http://pokylinux.org/sources/
-osc://.*/.* http://pokylinux.org/sources/
-p4://.*/.* http://pokylinux.org/sources/
-svk://.*/.* http://pokylinux.org/sources/
-svn://.*/.* http://pokylinux.org/sources/
-
-bzr://.*/.* http://autobuilder.pokylinux.org/sources/
-cvs://.*/.* http://autobuilder.pokylinux.org/sources/
-git://.*/.* http://autobuilder.pokylinux.org/sources/
-hg://.*/.* http://autobuilder.pokylinux.org/sources/
-osc://.*/.* http://autobuilder.pokylinux.org/sources/
-p4://.*/.* http://autobuilder.pokylinux.org/sources/
-svk://.*/.* http://autobuilder.pokylinux.org/sources/
-svn://.*/.* http://autobuilder.pokylinux.org/sources/
-}
-
-MIRRORS_append () {
-ftp://.*/.* http://pokylinux.org/sources/
-http://.*/.* http://pokylinux.org/sources/
-https://.*/.* http://pokylinux.org/sources/
-
-ftp://.*/.* http://autobuilder.pokylinux.org/sources/
-http://.*/.* http://autobuilder.pokylinux.org/sources/
-https://.*/.* http://autobuilder.pokylinux.org/sources/
-}
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
new file mode 100644
index 000000000..7e260ef38
--- /dev/null
+++ b/meta/classes/populate_sdk.bbclass
@@ -0,0 +1,82 @@
+inherit meta toolchain-scripts
+inherit populate_sdk_${IMAGE_PKGTYPE}
+
+SDK_DIR = "${WORKDIR}/sdk"
+SDK_OUTPUT = "${SDK_DIR}/image"
+SDK_DEPLOY = "${TMPDIR}/deploy/sdk"
+
+SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${TARGET_SYS}"
+
+TOOLCHAIN_HOST_TASK ?= "task-sdk-host task-cross-canadian-${TRANSLATED_TARGET_ARCH}"
+TOOLCHAIN_TARGET_TASK ?= "task-poky-standalone-sdk-target task-poky-standalone-sdk-target-dbg"
+TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${DISTRO_VERSION}"
+
+RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
+DEPENDS = "virtual/fakeroot-native sed-native"
+
+PID = "${@os.getpid()}"
+
+EXCLUDE_FROM_WORLD = "1"
+
+fakeroot do_populate_sdk() {
+ rm -rf ${SDK_OUTPUT}
+ mkdir -p ${SDK_OUTPUT}
+
+ populate_sdk_${IMAGE_PKGTYPE}
+
+ # Don't ship any libGL in the SDK
+ rm -rf ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/libGL*
+
+ # Can copy pstage files here
+ # target_pkgs=`cat ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/var/lib/opkg/status | grep Package: | cut -f 2 -d ' '`
+
+ # Fix or remove broken .la files
+ #rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}/lib/*.la
+ rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/*.la
+
+ # Link the ld.so.cache file into the hosts filesystem
+ ln -s /etc/ld.so.cache ${SDK_OUTPUT}/${SDKPATHNATIVE}/etc/ld.so.cache
+
+ # Setup site file for external use
+ siteconfig=${SDK_OUTPUT}/${SDKPATH}/site-config-${MULTIMACH_TARGET_SYS}
+ touch $siteconfig
+ for sitefile in ${CONFIG_SITE} ; do
+ cat $sitefile >> $siteconfig
+ done
+
+ toolchain_create_sdk_env_script
+
+ # Add version information
+ versionfile=${SDK_OUTPUT}/${SDKPATH}/version-${MULTIMACH_TARGET_SYS}
+ touch $versionfile
+ echo 'Distro: ${DISTRO}' >> $versionfile
+ echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
+ echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
+ echo 'Timestamp: ${DATETIME}' >> $versionfile
+
+ # Package it up
+ mkdir -p ${SDK_DEPLOY}
+ cd ${SDK_OUTPUT}
+ tar --owner=root --group=root -cj --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 .
+}
+
+populate_sdk_log_check() {
+ for target in $*
+ do
+ lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
+
+ echo "log_check: Using $lf_path as logfile"
+
+ if test -e "$lf_path"
+ then
+ ${IMAGE_PKGTYPE}_log_check $target $lf_path
+ else
+ echo "Cannot find logfile [$lf_path]"
+ fi
+ echo "Logfile is clean"
+ done
+}
+
+do_populate_sdk[nostamp] = "1"
+do_populate_sdk[recrdeptask] = "do_package_write"
+addtask populate_sdk before do_build after do_install
diff --git a/meta/classes/populate_sdk_deb.bbclass b/meta/classes/populate_sdk_deb.bbclass
new file mode 100644
index 000000000..d563c2867
--- /dev/null
+++ b/meta/classes/populate_sdk_deb.bbclass
@@ -0,0 +1,60 @@
+do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
+do_populate_sdk[recrdeptask] += "do_package_write_deb"
+
+
+populate_sdk_post_deb () {
+
+ local target_rootfs=$1
+
+ cp -r ${STAGING_ETCDIR_NATIVE}/apt ${target_rootfs}/etc
+}
+
+fakeroot populate_sdk_deb () {
+
+ package_update_index_deb
+
+ export INSTALL_TASK_DEB="populate_sdk"
+ export INSTALL_PACKAGES_LINGUAS_DEB=""
+ export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
+
+ #install target
+ echo "Installing TARGET packages"
+ export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
+ export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
+ export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
+ export INSTALL_PACKAGES_NORMAL_DEB="${TOOLCHAIN_TARGET_TASK}"
+
+ package_install_internal_deb
+ populate_sdk_post_deb ${INSTALL_ROOTFS_DEB}
+
+ populate_sdk_log_check populate_sdk
+
+ #install host
+ echo "Installing HOST packages"
+ export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}"
+ export INSTALL_BASEARCH_DEB="${SDK_ARCH}"
+ export INSTALL_PACKAGES_NORMAL_DEB="${TOOLCHAIN_HOST_TASK}"
+ INSTALL_ARCHS_DEB=""
+ for arch in ${PACKAGE_ARCHS}; do
+ sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
+ extension="-nativesdk"
+ if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
+ extension=""
+ fi
+ if [ -e ${DEPLOY_DIR_DEB}/$sdkarch$extension ]; then
+ INSTALL_ARCHS_DEB="$INSTALL_ARCHS_DEB $sdkarch$extension"
+ fi
+ done
+ export INSTALL_ARCHS_DEB
+
+ package_install_internal_deb
+ populate_sdk_post_deb ${SDK_OUTPUT}/${SDKPATHNATIVE}
+
+ #move remainings
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/dpkg
+ mv ${SDK_OUTPUT}/var/dpkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/dpkg
+ rm -rf ${SDK_OUTPUT}/var
+
+ populate_sdk_log_check populate_sdk
+}
+
diff --git a/meta/classes/populate_sdk_ipk.bbclass b/meta/classes/populate_sdk_ipk.bbclass
new file mode 100644
index 000000000..79259f80d
--- /dev/null
+++ b/meta/classes/populate_sdk_ipk.bbclass
@@ -0,0 +1,44 @@
+do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
+do_populate_sdk[recrdeptask] += "do_package_write_ipk"
+
+fakeroot populate_sdk_ipk() {
+
+ rm -f ${IPKGCONF_TARGET}
+ touch ${IPKGCONF_TARGET}
+ rm -f ${IPKGCONF_SDK}
+ touch ${IPKGCONF_SDK}
+
+ package_update_index_ipk
+ package_generate_ipkg_conf
+
+ export INSTALL_PACKAGES_ATTEMPTONLY_IPK=""
+ export INSTALL_PACKAGES_LINGUAS_IPK=""
+ export INSTALL_TASK_IPK="populate_sdk"
+
+ #install target
+ export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
+ export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
+ export INSTALL_PACKAGES_NORMAL_IPK="${TOOLCHAIN_TARGET_TASK}"
+
+ package_install_internal_ipk
+
+ #install host
+ export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}"
+ export INSTALL_CONF_IPK="${IPKGCONF_SDK}"
+ export INSTALL_PACKAGES_NORMAL_IPK="${TOOLCHAIN_HOST_TASK}"
+
+ package_install_internal_ipk
+
+ #post clean up
+ install -d ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}
+ install -m 0644 ${IPKGCONF_TARGET} ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}/
+
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
+ install -m 0644 ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
+
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg
+ mv ${SDK_OUTPUT}/var/lib/opkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg/
+ rm -Rf ${SDK_OUTPUT}/var
+
+ populate_sdk_log_check populate_sdk
+}
diff --git a/meta/classes/populate_sdk_rpm.bbclass b/meta/classes/populate_sdk_rpm.bbclass
new file mode 100644
index 000000000..e1abbad17
--- /dev/null
+++ b/meta/classes/populate_sdk_rpm.bbclass
@@ -0,0 +1,80 @@
+do_populate_sdk[depends] += "rpm-native:do_populate_sysroot"
+do_populate_sdk[recrdeptask] += "do_package_write_rpm"
+
+rpmlibdir = "/var/lib/rpm"
+RPMOPTS="--dbpath ${rpmlibdir} --define='_openall_before_chroot 1'"
+RPM="rpm ${RPMOPTS}"
+
+
+populate_sdk_post_rpm () {
+
+ local target_rootfs=$1
+
+ # remove lock files
+ rm -f ${target_rootfs}/__db.*
+
+ # Move manifests into the directory with the logs
+ mv ${target_rootfs}/install/*.manifest ${T}/
+
+ # Remove all remaining resolver files
+ rm -rf ${target_rootfs}/install
+}
+
+fakeroot populate_sdk_rpm () {
+
+ package_update_index_rpm
+ package_generate_rpm_conf
+
+ export INSTALL_PACKAGES_ATTEMPTONLY_RPM=""
+ export INSTALL_PACKAGES_LINGUAS_RPM=""
+ export INSTALL_PROVIDENAME_RPM="/bin/sh"
+ export INSTALL_TASK_RPM="populate_sdk"
+
+
+ #install target
+ export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
+ export INSTALL_PLATFORM_RPM="${TARGET_ARCH}"
+ export INSTALL_PLATFORM_EXTRA_RPM="${PACKAGE_ARCHS}"
+ export INSTALL_CONFBASE_RPM="${RPMCONF_TARGET_BASE}"
+ export INSTALL_PACKAGES_NORMAL_RPM="${TOOLCHAIN_TARGET_TASK}"
+
+ package_install_internal_rpm
+ populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
+
+ #install host
+ export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}"
+ export INSTALL_PLATFORM_RPM="${SDK_ARCH}"
+ export INSTALL_CONFBASE_RPM="${RPMCONF_HOST_BASE}"
+ export INSTALL_PACKAGES_NORMAL_RPM="${TOOLCHAIN_HOST_TASK}"
+ INSTALL_PLATFORM_EXTRA_RPM=""
+ for arch in ${PACKAGE_ARCHS}; do
+ sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
+ extension="-nativesdk"
+ if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
+ extension=""
+ fi
+ if [ -e ${DEPLOY_DIR_RPM}/$sdkarch$extension ]; then
+ INSTALL_PLATFORM_EXTRA_RPM="$INSTALL_PLATFORM_EXTRA_RPM $sdkarch"
+ fi
+ done
+ export INSTALL_PLATFORM_EXTRA_RPM
+
+ package_install_internal_rpm
+ populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
+
+ # move host RPM library data
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm
+ mv ${SDK_OUTPUT}${rpmlibdir}/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm/
+ rm -Rf ${SDK_OUTPUT}/var
+
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
+ mv ${SDK_OUTPUT}/etc/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
+ rm -rf ${SDK_OUTPUT}/etc
+
+ populate_sdk_log_check populate_sdk
+
+ # Workaround so the parser knows we need the resolve_package function!
+ if false ; then
+ resolve_package_rpm foo ${RPMCONF_TARGET_BASE}.conf || true
+ fi
+}
diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass
new file mode 100644
index 000000000..a072a9387
--- /dev/null
+++ b/meta/classes/python-dir.bbclass
@@ -0,0 +1,2 @@
+PYTHON_DIR = "python${PYTHON_BASEVERSION}"
+PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/qmake2.bbclass b/meta/classes/qmake2.bbclass
index e6810260f..5eebd8ec4 100644
--- a/meta/classes/qmake2.bbclass
+++ b/meta/classes/qmake2.bbclass
@@ -3,7 +3,7 @@
#
inherit qmake_base
-DEPENDS_prepend = "qt4-tools-native"
+DEPENDS_prepend = "qt4-tools-native "
export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
@@ -19,5 +19,6 @@ export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
export OE_QMAKE_LIBS_QT = "qt"
export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
+export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
new file mode 100644
index 000000000..670605ba4
--- /dev/null
+++ b/meta/classes/qt4e.bbclass
@@ -0,0 +1,18 @@
+DEPENDS_prepend = "${@["qt4-embedded ", ""][(bb.data.getVar('PN', d, 1)[:12] == 'qt4-embedded')]}"
+
+inherit qmake2
+
+QT_DIR_NAME = "qtopia"
+QT_LIBINFIX = "E"
+# override variables set by qmake-base to compile Qt/Embedded apps
+#
+export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
+export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
+export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
+export OE_QMAKE_LIBS_QT = "qt"
+export OE_QMAKE_LIBS_X11 = ""
+export OE_QMAKE_EXTRA_MODULES = "network"
+EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
+
+# Qt4 uses atomic instructions not supported in thumb mode
+ARM_INSTRUCTION_SET = "arm"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
new file mode 100644
index 000000000..abb1d9d2e
--- /dev/null
+++ b/meta/classes/qt4x11.bbclass
@@ -0,0 +1,9 @@
+DEPENDS_prepend = "${@["qt4-x11-free ", ""][(bb.data.getVar('PN', d, 1)[:12] == 'qt4-x11-free')]}"
+
+inherit qmake2
+
+QT_DIR_NAME = "qt4"
+QT_LIBINFIX = ""
+
+# Qt4 uses atomic instructions not supported in thumb mode
+ARM_INSTRUCTION_SET = "arm"
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 53fcda23a..7cbdb1762 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -14,22 +14,46 @@ RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
BB_DEFAULT_TASK = "rm_work_all"
do_rm_work () {
- # Ensure pseudo is no longer active
- if [ -d ${WORKDIR}/pseudo ]; then
- ${FAKEROOT} -S
- fi
cd ${WORKDIR}
for dir in *
do
if [ `basename ${S}` = $dir ]; then
rm -rf $dir
- elif [ $dir != 'temp' ]; then
+ # The package and package-split directories are retained by sstate for
+ # do_package so we retain them here too. Anything in sstate 'plaindirs'
+ # should be retained. Also retain logs and other files in temp.
+ elif [ $dir != 'temp' ] && [ $dir != 'package' ] && [ $dir != 'package-split' ]; then
rm -rf $dir
fi
done
# Need to add pseudo back or subsqeuent work in this workdir
# might fail since setscene may not rerun to recreate it
mkdir ${WORKDIR}/pseudo/
+
+ # Change normal stamps into setscene stamps as they better reflect the
+ # fact WORKDIR is now empty
+ cd `dirname ${STAMP}`
+ for i in `basename ${STAMP}`*
+ do
+ for j in ${SSTATETASKS}
+ do
+ case $i in
+ *do_setscene*)
+ break
+ ;;
+ *_setscene*)
+ i=dummy
+ break
+ ;;
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ i=dummy
+ break
+ ;;
+ esac
+ done
+ rm -f $i
+ done
}
addtask rm_work after do_${RMWORK_ORIG_TASK}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index f579b53a8..e03e80bdb 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -12,50 +12,29 @@ opkglibdir = "${localstatedir}/lib/opkg"
fakeroot rootfs_deb_do_rootfs () {
set +e
- mkdir -p ${IMAGE_ROOTFS}/var/dpkg/info
- mkdir -p ${IMAGE_ROOTFS}/var/dpkg/updates
- rm -f ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
- rm -f ${STAGING_ETCDIR_NATIVE}/apt/preferences
- > ${IMAGE_ROOTFS}/var/dpkg/status
- > ${IMAGE_ROOTFS}/var/dpkg/available
mkdir -p ${IMAGE_ROOTFS}/var/dpkg/alternatives
- priority=1
- for arch in ${PACKAGE_ARCHS}; do
- if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
- continue;
- fi
- cd ${DEPLOY_DIR_DEB}/$arch
- # if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then
- rm -f Packages.gz Packages Packages.bz2
- # fi
- dpkg-scanpackages . | bzip2 > Packages.bz2
- echo "Label: $arch" > Release
-
- echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
- (echo "Package: *"
- echo "Pin: release l=$arch"
- echo "Pin-Priority: $(expr 800 + $priority)"
- echo) >> ${STAGING_ETCDIR_NATIVE}/apt/preferences
- priority=$(expr $priority + 5)
- done
+ # update index
+ package_update_index_deb
+
+ #install packages
+ export INSTALL_ROOTFS_DEB="${IMAGE_ROOTFS}"
+ export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
+ export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
+ export INSTALL_PACKAGES_NORMAL_DEB="${PACKAGE_INSTALL}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_DEB="${PACKAGE_INSTALL_ATTEMPTONLY}"
+ export INSTALL_PACKAGES_LINGUAS_DEB="${LINGUAS_INSTALL}"
+ export INSTALL_TASK_DEB="rootfs"
- tac ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev > ${STAGING_ETCDIR_NATIVE}/apt/sources.list
+ package_install_internal_deb
- cat "${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample" \
- | sed -e 's#Architecture ".*";#Architecture "${DPKG_ARCH}";#' \
- | sed -e 's:#ROOTFS#:${IMAGE_ROOTFS}:g' \
- > "${STAGING_ETCDIR_NATIVE}/apt/apt-rootfs.conf"
- export APT_CONFIG="${STAGING_ETCDIR_NATIVE}/apt/apt-rootfs.conf"
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
export OPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
- apt-get update
-
_flag () {
sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/dpkg/status
}
@@ -63,47 +42,6 @@ fakeroot rootfs_deb_do_rootfs () {
cat ${IMAGE_ROOTFS}/var/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}"
}
- if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
- if [ ! -z "${LINGUAS_INSTALL}" ]; then
- apt-get install glibc-localedata-i18n --force-yes --allow-unauthenticated
- if [ $? -ne 0 ]; then
- exit 1
- fi
- for i in ${LINGUAS_INSTALL}; do
- apt-get install $i --force-yes --allow-unauthenticated
- if [ $? -ne 0 ]; then
- exit 1
- fi
- done
- fi
- fi
-
- if [ ! -z "${PACKAGE_INSTALL}" ]; then
- for i in ${PACKAGE_INSTALL}; do
- apt-get install $i --force-yes --allow-unauthenticated
- if [ $? -ne 0 ]; then
- exit 1
- fi
- done
- fi
-
- rm ${WORKDIR}/temp/log.do_$target-attemptonly.${PID}
- if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
- for i in ${PACKAGE_INSTALL_ATTEMPTONLY}; do
- apt-get install $i --force-yes --allow-unauthenticated >> ${WORKDIR}/temp/log.do_rootfs-attemptonly.${PID} || true
- done
- fi
-
- find ${IMAGE_ROOTFS} -name \*.dpkg-new | for i in `cat`; do
- mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
- done
-
- install -d ${IMAGE_ROOTFS}/${sysconfdir}
- echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
-
- # Mark all packages installed
- sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${IMAGE_ROOTFS}/var/dpkg/status
-
# Attempt to run preinsts
# Mark packages with preinst failures as unpacked
for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.preinst; do
@@ -122,6 +60,9 @@ fakeroot rootfs_deb_do_rootfs () {
set -e
+ install -d ${IMAGE_ROOTFS}/${sysconfdir}
+ echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
+
# Hacks to allow opkg's update-alternatives and opkg to coexist for now
mkdir -p ${IMAGE_ROOTFS}${opkglibdir}
if [ -e ${IMAGE_ROOTFS}/var/dpkg/alternatives ]; then
@@ -133,27 +74,7 @@ fakeroot rootfs_deb_do_rootfs () {
${ROOTFS_POSTPROCESS_COMMAND}
- log_check rootfs
-}
-
-rootfs_deb_log_check() {
- target="$1"
- lf_path="$2"
-
- lf_txt="`cat $lf_path`"
- for keyword_die in "E:"
- do
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
- echo ""
- do_exit=1
- fi
- done
- test "$do_exit" = 1 && exit 1
- true
+ log_check rootfs
}
remove_packaging_data_files() {
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index c9edec2b5..5727d1502 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -12,7 +12,7 @@ ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_ipk"
-IPKG_ARGS = "-f ${IPKGCONF_TARGET} -o ${IMAGE_ROOTFS}"
+IPKG_ARGS = "-f ${IPKGCONF_TARGET} -o ${IMAGE_ROOTFS} --force-overwrite"
OPKG_PREPROCESS_COMMANDS = "package_update_index_ipk; package_generate_ipkg_conf"
@@ -29,26 +29,19 @@ fakeroot rootfs_ipk_do_rootfs () {
${OPKG_PREPROCESS_COMMANDS}
mkdir -p ${T}/
- mkdir -p ${IMAGE_ROOTFS}${opkglibdir}
- opkg-cl ${IPKG_ARGS} update
+ #install
+ export INSTALL_PACKAGES_ATTEMPTONLY_IPK="${PACKAGE_INSTALL_ATTEMPTONLY}"
+ export INSTALL_PACKAGES_LINGUAS_IPK="${LINGUAS_INSTALL}"
+ export INSTALL_TASK_IPK="rootfs"
- # Uclibc builds don't provide this stuff...
- if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
- if [ ! -z "${LINGUAS_INSTALL}" ]; then
- for i in ${LINGUAS_INSTALL}; do
- opkg-cl ${IPKG_ARGS} install $i
- done
- fi
- fi
- if [ ! -z "${PACKAGE_INSTALL}" ]; then
- opkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL}
- fi
+ export INSTALL_ROOTFS_IPK="${IMAGE_ROOTFS}"
+ export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
+ export INSTALL_PACKAGES_NORMAL_IPK="${PACKAGE_INSTALL}"
- if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
- opkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL_ATTEMPTONLY} > "${WORKDIR}/temp/log.do_rootfs_attemptonly.${PID}" || true
- fi
+ package_install_internal_ipk
+ #post install
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
@@ -82,26 +75,6 @@ fakeroot rootfs_ipk_do_rootfs () {
log_check rootfs
}
-rootfs_ipk_log_check() {
- target="$1"
- lf_path="$2"
-
- lf_txt="`cat $lf_path`"
- for keyword_die in "exit 1" "Collected errors" ERR Fail
- do
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
- echo ""
- do_exit=1
- fi
- done
- test "$do_exit" = 1 && exit 1
- true
-}
-
rootfs_ipk_write_manifest() {
manifest=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest
cp ${IMAGE_ROOTFS}${opkglibdir}/status $manifest
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 33abe96df..6fe11f77e 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -2,14 +2,18 @@
# Creates a root filesystem out of rpm packages
#
-ROOTFS_PKGMANAGE = "rpm zypper"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+ROOTFS_PKGMANAGE = "rpm zypper"
+# Postinstalls on device are handled within this class at present
+ROOTFS_PKGMANAGE_BOOTSTRAP = ""
do_rootfs[depends] += "rpm-native:do_populate_sysroot"
# Needed for update-alternatives
do_rootfs[depends] += "opkg-native:do_populate_sysroot"
+# Creating the repo info in do_rootfs
+#do_rootfs[depends] += "createrepo-native:do_populate_sysroot"
+
do_rootfs[recrdeptask] += "do_package_write_rpm"
AWKPOSTINSTSCRIPT = "${POKYBASE}/scripts/rootfs_rpm-extract-postinst.awk"
@@ -33,122 +37,27 @@ RPM="rpm ${RPMOPTS}"
do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
fakeroot rootfs_rpm_do_rootfs () {
- set +x
+ #set +x
${RPM_PREPROCESS_COMMANDS}
+ #createrepo "${DEPLOY_DIR_RPM}"
+
# Setup base system configuration
mkdir -p ${IMAGE_ROOTFS}/etc/rpm/
- echo "${TARGET_ARCH}-linux" >${IMAGE_ROOTFS}/etc/rpm/platform
-
- # Tell RPM that the "/" directory exist and is available
- mkdir -p ${IMAGE_ROOTFS}/etc/rpm/sysinfo
- echo "/" >${IMAGE_ROOTFS}/etc/rpm/sysinfo/Dirnames
-
- # Setup manifest of packages to install...
- mkdir -p ${IMAGE_ROOTFS}/install
- echo "# Install manifest" > ${IMAGE_ROOTFS}/install/install.manifest
-
- # Uclibc builds don't provide this stuff...
- if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
- if [ ! -z "${LINGUAS_INSTALL}" ]; then
- for pkg in ${LINGUAS_INSTALL}; do
- echo "Processing $pkg..."
- pkg_name=$(resolve_package $pkg)
- if [ -z "$pkg_name" ]; then
- echo "Unable to find package $pkg!"
- exit 1
- fi
- echo $pkg_name >> ${IMAGE_ROOTFS}/install/install.manifest
- done
- fi
- fi
- if [ ! -z "${PACKAGE_INSTALL}" ]; then
- for pkg in ${PACKAGE_INSTALL} ; do
- echo "Processing $pkg..."
- pkg_name=$(resolve_package $pkg)
- if [ -z "$pkg_name" ]; then
- echo "Unable to find package $pkg!"
- exit 1
- fi
- echo $pkg_name >> ${IMAGE_ROOTFS}/install/install.manifest
- done
- fi
+ #install pacakges
+ export INSTALL_ROOTFS_RPM="${IMAGE_ROOTFS}"
+ export INSTALL_PLATFORM_RPM="${TARGET_ARCH}"
+ export INSTALL_PLATFORM_EXTRA_RPM="${PACKAGE_ARCHS}"
+ export INSTALL_CONFBASE_RPM="${RPMCONF_TARGET_BASE}"
+ export INSTALL_PACKAGES_NORMAL_RPM="${PACKAGE_INSTALL}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_RPM="${PACKAGE_INSTALL_ATTEMPTONLY}"
+ export INSTALL_PACKAGES_LINGUAS_RPM="${LINGUAS_INSTALL}"
+ export INSTALL_PROVIDENAME_RPM=""
+ export INSTALL_TASK_RPM="populate_sdk"
- # Generate an install solution by doing a --justdb install, then recreate it with
- # an actual package install!
- ${RPM} -D "_dbpath ${IMAGE_ROOTFS}/install" -D "`cat ${DEPLOY_DIR_RPM}/solvedb.macro`" \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync" \
- -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
- ${IMAGE_ROOTFS}/install/install.manifest
-
- if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
- echo "Adding attempt only packages..."
- for pkg in ${PACKAGE_INSTALL_ATTEMPTONLY} ; do
- echo "Processing $pkg..."
- pkg_name=$(resolve_package $pkg)
- if [ -z "$pkg_name" ]; then
- echo "Unable to find package $pkg!"
- exit 1
- fi
- echo "Attempting $pkg_name..." >> "${WORKDIR}/temp/log.do_rootfs_attemptonly.${PID}"
- ${RPM} -D "_dbpath ${IMAGE_ROOTFS}/install" -D "`cat ${DEPLOY_DIR_RPM}/solvedb.macro`" \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync private" \
- -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
- $pkg_name >> "${WORKDIR}/temp/log.do_rootfs_attemptonly.${PID}" || true
- done
- fi
-
-#### Note: 'Recommends' is an arbitrary tag that means _SUGGESTS_ in Poky..
- # Add any recommended packages to the image
- # RPM does not solve for recommended packages because they are optional...
- # So we query them and tree them like the ATTEMPTONLY packages above...
- # Change the loop to "1" to run this code...
- loop=0
- if [ $loop -eq 1 ]; then
- echo "Processing recommended packages..."
- cat /dev/null > ${IMAGE_ROOTFS}/install/recommend.list
- while [ $loop -eq 1 ]; do
- # Dump the full set of recommends...
- ${RPM} -D "_dbpath ${IMAGE_ROOTFS}/install" -D "`cat ${DEPLOY_DIR_RPM}/solvedb.macro`" \
- -qa --qf "[%{RECOMMENDS}\n]" | sort -u > ${IMAGE_ROOTFS}/install/recommend
- # Did we add more to the list?
- grep -v -x -F -f ${IMAGE_ROOTFS}/install/recommend.list ${IMAGE_ROOTFS}/install/recommend > ${IMAGE_ROOTFS}/install/recommend.new || true
- # We don't want to loop unless there is a change to the list!
- loop=0
- cat ${IMAGE_ROOTFS}/install/recommend.new | \
- while read pkg ; do
- # Ohh there was a new one, we'll need to loop again...
- loop=1
- echo "Processing $pkg..."
- pkg_name=$(resolve_package $pkg)
- if [ -z "$pkg_name" ]; then
- echo "Unable to find package $pkg." >> "${WORKDIR}/temp/log.do_rootfs_recommend.${PID}"
- continue
- fi
- echo "Attempting $pkg_name..." >> "${WORKDIR}/temp/log.do_rootfs_recommend.${PID}"
- ${RPM} -D "_dbpath ${IMAGE_ROOTFS}/install" -D "`cat ${DEPLOY_DIR_RPM}/solvedb.macro`" \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync private" \
- -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
- $pkg_name >> "${WORKDIR}/temp/log.do_rootfs_recommend.${PID}" 2>&1 || true
- done
- cat ${IMAGE_ROOTFS}/install/recommend.list ${IMAGE_ROOTFS}/install/recommend.new | sort -u > ${IMAGE_ROOTFS}/install/recommend.new.list
- mv ${IMAGE_ROOTFS}/install/recommend.new.list ${IMAGE_ROOTFS}/install/recommend.list
- rm ${IMAGE_ROOTFS}/install/recommend ${IMAGE_ROOTFS}/install/recommend.new
- done
- fi
-
- # Now that we have a solution, pull out a list of what to install...
- echo "Manifest: ${IMAGE_ROOTFS}/install/install.manifest"
- ${RPM} -D "_dbpath ${IMAGE_ROOTFS}/install" -qa --yaml \
- | grep -i 'Packageorigin' | cut -d : -f 2 > ${IMAGE_ROOTFS}/install/install_solution.manifest
-
- # Attempt install
- ${RPM} --root ${IMAGE_ROOTFS} -D "_dbpath ${rpmlibdir}" \
- --noscripts --notriggers --noparentdirs --nolinktos \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync private" \
- -Uhv ${IMAGE_ROOTFS}/install/install_solution.manifest
+ package_install_internal_rpm
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
@@ -159,6 +68,7 @@ fakeroot rootfs_rpm_do_rootfs () {
mkdir -p ${IMAGE_ROOTFS}/etc/rpm-postinsts/
${RPM} --root ${IMAGE_ROOTFS} -D '_dbpath ${rpmlibdir}' -qa \
+ -D "__dbi_txn create nofsync private" \
--qf 'Name: %{NAME}\n%|POSTIN?{postinstall scriptlet%|POSTINPROG?{ (using %{POSTINPROG})}|:\n%{POSTIN}\n}:{%|POSTINPROG?{postinstall program: %{POSTINPROG}\n}|}|' \
> ${IMAGE_ROOTFS}/etc/rpm-postinsts/combined
awk -f ${AWKPOSTINSTSCRIPT} < ${IMAGE_ROOTFS}/etc/rpm-postinsts/combined
@@ -202,54 +112,25 @@ EOF
# remove lock files
rm -f ${IMAGE_ROOTFS}${rpmlibdir}/__db.*
- # remove resolver files and manifests
- rm -f ${IMAGE_ROOTFS}/install/install.manifest
+ # Move manifests into the directory with the logs
+ mv ${IMAGE_ROOTFS}/install/*.manifest ${T}/
+
+ # Remove all remaining resolver files
+ rm -rf ${IMAGE_ROOTFS}/install
log_check rootfs
# Workaround so the parser knows we need the resolve_package function!
if false ; then
- resolve_package foo || true
+ resolve_package_rpm foo ${RPMCONF_TARGET_BASE}.conf || true
fi
}
-rootfs_rpm_log_check() {
- target="$1"
- lf_path="$2"
-
- lf_txt="`cat $lf_path`"
- for keyword_die in "Cannot find package" "exit 1" ERR Fail
- do
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
- echo ""
- do_exit=1
- fi
- done
- test "$do_exit" = 1 && exit 1
- true
-}
-
remove_packaging_data_files() {
rm -rf ${IMAGE_ROOTFS}${rpmlibdir}
rm -rf ${IMAGE_ROOTFS}${opkglibdir}
}
-# Resolve package names to filepaths
-resolve_package() {
- pkg="$1"
- pkg_name=""
- for solve in `cat ${DEPLOY_DIR_RPM}/solvedb.conf`; do
- pkg_name=$(${RPM} -D "_dbpath $solve" -D "_dbi_tags_3 Packages:Name:Basenames:Providename:Nvra" -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync" -q --yaml $pkg | grep -i 'Packageorigin' | cut -d : -f 2)
- if [ -n "$pkg_name" ]; then
- break;
- fi
- done
- echo $pkg_name
-}
install_all_locales() {
PACKAGES_TO_INSTALL=""
@@ -257,7 +138,7 @@ install_all_locales() {
# Generate list of installed packages...
INSTALLED_PACKAGES=$( \
${RPM} --root ${IMAGE_ROOTFS} -D "_dbpath ${rpmlibdir}" \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync private" \
+ -D "__dbi_txn create nofsync private" \
-qa --qf "[%{NAME}\n]" | egrep -v -- "(-locale-|-dev$|-doc$|^kernel|^glibc|^ttf|^task|^perl|^python)" \
)
@@ -265,11 +146,11 @@ install_all_locales() {
# but this should be good enough for the few users of this function...
for pkg in $INSTALLED_PACKAGES; do
for lang in ${IMAGE_LOCALES}; do
- pkg_name=$(resolve_package $pkg-locale-$lang)
+ pkg_name=$(resolve_package_rpm $pkg-locale-$lang ${RPMCONF_TARGET_BASE}.conf)
if [ -n "$pkg_name" ]; then
${RPM} --root ${IMAGE_ROOTFS} -D "_dbpath ${rpmlibdir}" \
+ -D "__dbi_txn create nofsync private" \
--noscripts --notriggers --noparentdirs --nolinktos \
- -D "__dbi_cdb create mp_mmapsize=128Mb mp_size=1Mb nofsync private" \
-Uhv $pkg_name || true
fi
done
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 9d183e352..13940f81d 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -124,6 +124,14 @@ def check_sanity(e):
if data.getVar('MACHINE', e.data, True):
if not check_conf_exists("conf/machine/${MACHINE}.conf", e.data):
messages = messages + 'Please set a valid MACHINE in your local.conf\n'
+
+ # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
+ # set, since so much relies on it being set.
+ dldir = data.getVar('DL_DIR', e.data, True)
+ if not dldir:
+ messages = messages + "DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n"
+ if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
+ messages = messages + "DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir
# Check that the DISTRO is valid
# need to take into account DISTRO renaming DISTRO
@@ -155,6 +163,9 @@ def check_sanity(e):
if not check_app_exists("qemu-arm", e.data):
messages = messages + "qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH"
+ if "." in data.getVar('PATH', e.data, True).split(":"):
+ messages = messages + "PATH contains '.' which will break the build, please remove this"
+
if data.getVar('TARGET_ARCH', e.data, True) == "arm":
# This path is no longer user-readable in modern (very recent) Linux
try:
@@ -178,11 +189,12 @@ def check_sanity(e):
if not data.getVar( 'DISPLAY', e.data, True ) and data.getVar( 'IMAGETEST', e.data, True ) == 'qemu':
messages = messages + 'qemuimagetest needs a X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n'
- # Ensure we have the binary for TERMCMD, as when patch application fails the error is fairly intimidating
- termcmd = data.getVar("TERMCMD", e.data, True)
- term = termcmd.split()[0]
- if not check_app_exists(term, e.data):
- messages = messages + "The console for use in patch error resolution is not available, please install %s or set TERMCMD and TERMCMDRUN (as documented in local.conf).\n" % term
+ if data.getVar('PATCHRESOLVE', e.data, True) != 'noop':
+ # Ensure we have the binary for TERMCMD, as when patch application fails the error is fairly intimidating
+ termcmd = data.getVar("TERMCMD", e.data, True)
+ term = termcmd.split()[0]
+ if not check_app_exists(term, e.data):
+ messages = messages + "The console for use in patch error resolution is not available, please install %s or set TERMCMD and TERMCMDRUN (as documented in local.conf).\n" % term
if os.path.basename(os.readlink('/bin/sh')) == 'dash':
messages = messages + "Using dash as /bin/sh causes various subtle build problems, please use bash instead (e.g. 'dpkg-reconfigure dash' on an Ubuntu system.\n"
@@ -196,9 +208,6 @@ def check_sanity(e):
if not oes_bb_conf:
messages = messages + 'You do not include OpenEmbeddeds version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n'
- if data.getVar('SDK_ARCH', e.data, True) == 'i686':
- messages = messages + '"Please set SDKMACHINE to i586. It is currently defaulting to the build machine architecture of i686 and this is known to have issues (see local.conf).\n'
-
nolibs = data.getVar('NO32LIBS', e.data, True)
if not nolibs:
lib32path = '/lib'
@@ -292,7 +301,7 @@ def check_sanity(e):
f.write(current_abi)
elif (abi != current_abi):
# Code to convert from one ABI to another could go here if possible.
- messages = messages + "Error, TMPDIR has changed ABI (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi)
+ messages = messages + "Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi)
else:
f = file(abifile, "w")
f.write(current_abi)
@@ -300,7 +309,7 @@ def check_sanity(e):
oeroot = data.getVar('POKYBASE', e.data)
if oeroot.find ('+') != -1:
- messages = messages + "Error, you have an invalid character (+) in your POKYBASE directory path. Please more Poky to a directory which doesn't include a +."
+ messages = messages + "Error, you have an invalid character (+) in your POKYBASE directory path. Please move Poky to a directory which doesn't include a +."
elif oeroot.find (' ') != -1:
messages = messages + "Error, you have a space in your POKYBASE directory path. Please move Poky to a directory which doesn't include a space."
@@ -309,9 +318,8 @@ def check_sanity(e):
addhandler check_sanity_eventhandler
python check_sanity_eventhandler() {
- from bb.event import Handled, NotHandled
if bb.event.getName(e) == "ConfigParsed" and bb.data.getVar("BB_WORKERCONTEXT", e.data, True) != "1":
check_sanity(e)
- return NotHandled
+ return
}
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
new file mode 100644
index 000000000..ced9509df
--- /dev/null
+++ b/meta/classes/setuptools.bbclass
@@ -0,0 +1,8 @@
+inherit distutils
+
+DEPENDS += "python-setuptools-native"
+
+DISTUTILS_INSTALL_ARGS = "--root=${D} \
+ --single-version-externally-managed \
+ --prefix=${prefix} \
+ --install-data=${datadir}"
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
index 37d910e1d..0813c2543 100644
--- a/meta/classes/siteconfig.bbclass
+++ b/meta/classes/siteconfig.bbclass
@@ -10,18 +10,21 @@ python siteconfig_do_siteconfig () {
sstate_install(shared_state, d)
}
+EXTRASITECONFIG ?= ""
+
siteconfig_do_siteconfig_gencache () {
- mkdir -p ${WORKDIR}/site_config
+ mkdir -p ${WORKDIR}/site_config_${MACHINE}
gen-site-config ${FILE_DIRNAME}/site_config \
- >${WORKDIR}/site_config/configure.ac
- cd ${WORKDIR}/site_config
+ >${WORKDIR}/site_config_${MACHINE}/configure.ac
+ cd ${WORKDIR}/site_config_${MACHINE}
autoconf
- CONFIG_SITE="" ./configure ${CONFIGUREOPTS} --cache-file ${PN}_cache
+ CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${PN}_cache
sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
-e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
< ${PN}_cache > ${PN}_config
- mkdir -p ${SYSROOT_DESTDIR}${SITECONFIG_SYSROOTCACHE}
- cp ${PN}_config ${SYSROOT_DESTDIR}${SITECONFIG_SYSROOTCACHE}
+ mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+ cp ${PN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+
}
do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
index 0ba130c88..e4564e4b0 100644
--- a/meta/classes/sstate.bbclass
+++ b/meta/classes/sstate.bbclass
@@ -1,11 +1,11 @@
-SSTATE_VERSION = "1"
+SSTATE_VERSION = "2"
SSTATE_MANIFESTS = "${TMPDIR}/sstate-control"
-SSTATE_MANFILEBASE = "${SSTATE_MANIFESTS}/manifest-${SSTATE_PKGARCH}-"
+SSTATE_MANFILEBASE = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-"
SSTATE_MANFILEPREFIX = "${SSTATE_MANFILEBASE}${PN}"
-SSTATE_PKGARCH = "${BASE_PACKAGE_ARCH}"
+SSTATE_PKGARCH = "${MULTIMACH_ARCH}"
SSTATE_PKGSPEC = "sstate-${PN}-${MULTIMACH_ARCH}${TARGET_VENDOR}-${TARGET_OS}-${PV}-${PR}-${SSTATE_PKGARCH}-${SSTATE_VERSION}-"
SSTATE_PKGNAME = "${SSTATE_PKGSPEC}${BB_TASKHASH}"
SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
@@ -14,15 +14,22 @@ SSTATE_SCAN_CMD ?= "find ${SSTATE_BUILDDIR} \( -name "*.la" -o -name "*-config"
BB_HASHFILENAME = "${SSTATE_PKGNAME}"
+SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
+
python () {
if bb.data.inherits_class('native', d):
bb.data.setVar('SSTATE_PKGARCH', bb.data.getVar('BUILD_ARCH', d), d)
- elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
+ elif bb.data.inherits_class('cross', d):
+ bb.data.setVar('SSTATE_PKGARCH', bb.data.expand("${BUILD_ARCH}_${BASE_PACKAGE_ARCH}", d), d)
+ bb.data.setVar('SSTATE_MANMACH', bb.data.expand("${BUILD_ARCH}_${MACHINE}", d), d)
+ elif bb.data.inherits_class('crosssdk', d):
bb.data.setVar('SSTATE_PKGARCH', bb.data.expand("${BUILD_ARCH}_${BASE_PACKAGE_ARCH}", d), d)
elif bb.data.inherits_class('nativesdk', d):
bb.data.setVar('SSTATE_PKGARCH', bb.data.expand("${SDK_ARCH}", d), d)
elif bb.data.inherits_class('cross-canadian', d):
bb.data.setVar('SSTATE_PKGARCH', bb.data.expand("${SDK_ARCH}_${BASE_PACKAGE_ARCH}", d), d)
+ else:
+ bb.data.setVar('SSTATE_MANMACH', bb.data.expand("${MACHINE}", d), d)
# These classes encode staging paths into their scripts data so can only be
# reused if we manipulate the paths
@@ -30,17 +37,21 @@ python () {
scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
bb.data.setVar('SSTATE_SCAN_CMD', scan_cmd, d)
+ namemap = []
for task in (bb.data.getVar('SSTATETASKS', d, True) or "").split():
+ namemap.append(bb.data.getVarFlag(task, 'sstate-name', d))
funcs = bb.data.getVarFlag(task, 'prefuncs', d) or ""
funcs = "sstate_task_prefunc " + funcs
bb.data.setVarFlag(task, 'prefuncs', funcs, d)
funcs = bb.data.getVarFlag(task, 'postfuncs', d) or ""
funcs = funcs + " sstate_task_postfunc"
bb.data.setVarFlag(task, 'postfuncs', funcs, d)
+ d.setVar('SSTATETASKNAMES', " ".join(namemap))
}
-def sstate_init(name, d):
+def sstate_init(name, task, d):
ss = {}
+ ss['task'] = task
ss['name'] = name
ss['dirs'] = []
ss['plaindirs'] = []
@@ -63,7 +74,7 @@ def sstate_state_fromvars(d, task = None):
if not name or len(inputs) != len(outputs):
bb.fatal("sstate variables not setup correctly?!")
- ss = sstate_init(name, d)
+ ss = sstate_init(name, task, d)
for i in range(len(inputs)):
sstate_add(ss, inputs[i], outputs[i], d)
ss['lockfiles'] = lockfiles
@@ -87,30 +98,6 @@ def sstate_install(ss, d):
if os.access(manifest, os.R_OK):
bb.fatal("Package already staged (%s)?!" % manifest)
- def checkmanifest(pn, task):
- return os.access(bb.data.expand("${SSTATE_MANFILEBASE}%s.%s" % (pn, task), d), os.R_OK)
-
- skipinst = False
- pn = d.getVar("PN", True)
- if pn == "gcc-cross-initial":
- if checkmanifest("gcc-cross", "populate-sysroot"):
- skipinst = True
- if checkmanifest("gcc-cross-intermediate", "populate-sysroot"):
- skipinst = True
- elif pn == "gcc-cross-intermediate":
- if checkmanifest("gcc-cross", "populate-sysroot"):
- skipinst = True
- elif pn == "glibc-initial":
- if checkmanifest("glibc", "populate-sysroot"):
- skipinst = True
- elif pn == "eglibc-initial":
- if checkmanifest("eglibc", "populate-sysroot"):
- skipinst = True
-
- if skipinst:
- bb.note("Not staging %s.%s as sysroot already contains better functionality" % (pn, ss['name']))
- return
-
locks = []
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
@@ -168,10 +155,14 @@ def sstate_installpkg(ss, d):
fixmefn = sstateinst + "fixmepath"
if os.path.isfile(fixmefn):
staging = bb.data.getVar('STAGING_DIR', d, True)
+ staging_target = bb.data.getVar('STAGING_DIR_TARGET', d, True)
+ staging_host = bb.data.getVar('STAGING_DIR_HOST', d, True)
fixmefd = open(fixmefn, "r")
fixmefiles = fixmefd.readlines()
fixmefd.close()
for file in fixmefiles:
+ os.system("sed -i -e s:FIXMESTAGINGDIRTARGET:%s:g %s" % (staging_target, sstateinst + file))
+ os.system("sed -i -e s:FIXMESTAGINGDIRHOST:%s:g %s" % (staging_host, sstateinst + file))
os.system("sed -i -e s:FIXMESTAGINGDIR:%s:g %s" % (staging, sstateinst + file))
for state in ss['dirs']:
@@ -206,9 +197,6 @@ def sstate_clean_cachefiles(d):
def sstate_clean_manifest(manifest, d):
import oe.path
- if not os.path.exists(manifest):
- return
-
mfile = open(manifest)
entries = mfile.readlines()
mfile.close()
@@ -232,9 +220,13 @@ def sstate_clean_manifest(manifest, d):
oe.path.remove(manifest)
def sstate_clean(ss, d):
+ import oe.path
manifest = bb.data.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['name'], d)
+ if not os.path.exists(manifest):
+ return
+
locks = []
for lock in ss['lockfiles']:
locks.append(bb.utils.lockfile(lock))
@@ -244,7 +236,8 @@ def sstate_clean(ss, d):
for lock in locks:
bb.utils.unlockfile(lock)
-SCENEFUNCS += "sstate_cleanall"
+ oe.path.remove(d.getVar("STAMP", True) + ".do_" + ss['task'] + "*")
+
CLEANFUNCS += "sstate_cleanall"
python sstate_cleanall() {
@@ -261,9 +254,45 @@ python sstate_cleanall() {
for manifest in (os.listdir(manifest_dir)):
if fnmatch.fnmatch(manifest, manifest_pattern):
- sstate_clean_manifest(manifest_dir + "/" + manifest, d)
+ name = manifest.replace(manifest_pattern[:-1], "")
+ namemap = d.getVar('SSTATETASKNAMES', True).split()
+ tasks = d.getVar('SSTATETASKS', True).split()
+ taskname = tasks[namemap.index(name)]
+ shared_state = sstate_state_fromvars(d, taskname[3:])
+ sstate_clean(shared_state, d)
}
+def sstate_hardcode_path(d):
+ # Need to remove hardcoded paths and fix these when we install the
+ # staging packages.
+ sstate_scan_cmd = bb.data.getVar('SSTATE_SCAN_CMD', d, True)
+ p = os.popen("%s" % sstate_scan_cmd)
+ file_list = p.read()
+
+ if file_list == "":
+ p.close()
+ return
+
+ staging = bb.data.getVar('STAGING_DIR', d, True)
+ staging_target = bb.data.getVar('STAGING_DIR_TARGET', d, True)
+ staging_host = bb.data.getVar('STAGING_DIR_HOST', d, True)
+ sstate_builddir = bb.data.getVar('SSTATE_BUILDDIR', d, True)
+
+ for i in file_list.split('\n'):
+ if not i:
+ continue
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
+ cmd = "sed -i -e s:%s:FIXMESTAGINGDIR:g %s" % (staging, i)
+ elif bb.data.inherits_class('cross', d):
+ cmd = "sed -i -e s:%s:FIXMESTAGINGDIRTARGET:g %s \
+ sed -i -e s:%s:FIXMESTAGINGDIR:g %s" % (staging_target, i, staging, i)
+ else:
+ cmd = "sed -i -e s:%s:FIXMESTAGINGDIRHOST:g %s" % (staging_host, i)
+
+ os.system(cmd)
+ os.system("echo %s | sed -e 's:%s::' >> %sfixmepath" % (i, sstate_builddir, sstate_builddir))
+ p.close()
+
def sstate_package(ss, d):
import oe.path
@@ -289,6 +318,7 @@ def sstate_package(ss, d):
bb.data.setVar('SSTATE_BUILDDIR', sstatebuild, d)
bb.data.setVar('SSTATE_PKG', sstatepkg, d)
+ sstate_hardcode_path(d)
bb.build.exec_func('sstate_create_package', d)
bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
@@ -296,37 +326,40 @@ def sstate_package(ss, d):
return
def pstaging_fetch(sstatepkg, d):
- import bb.fetch
-
- # only try and fetch if the user has configured a mirror
+ # Only try and fetch if the user has configured a mirror
mirrors = bb.data.getVar('SSTATE_MIRRORS', d, True)
- if mirrors:
- # Copy the data object and override DL_DIR and SRC_URI
- localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
+ if not mirrors:
+ return
- dldir = bb.data.expand("${SSTATE_DIR}", localdata)
- srcuri = "file://" + os.path.basename(sstatepkg)
+ import bb.fetch2
+ # Copy the data object and override DL_DIR and SRC_URI
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
- bb.mkdirhier(dldir)
+ dldir = bb.data.expand("${SSTATE_DIR}", localdata)
+ srcuri = "file://" + os.path.basename(sstatepkg)
- bb.data.setVar('DL_DIR', dldir, localdata)
- bb.data.setVar('PREMIRRORS', mirrors, localdata)
- bb.data.setVar('SRC_URI', srcuri, localdata)
+ bb.mkdirhier(dldir)
- # Try a fetch from the sstate mirror, if it fails just return and
- # we will build the package
- try:
- bb.fetch.init([srcuri], localdata)
- bb.fetch.go(localdata, [srcuri])
- # Need to optimise this, if using file:// urls, the fetcher just changes the local path
- # For now work around by symlinking
- localpath = bb.data.expand(bb.fetch.localpath(srcuri, localdata), localdata)
- if localpath != sstatepkg and os.path.exists(localpath):
- os.symlink(localpath, sstatepkg)
- except:
- pass
+ bb.data.setVar('DL_DIR', dldir, localdata)
+ bb.data.setVar('PREMIRRORS', mirrors, localdata)
+ bb.data.setVar('SRC_URI', srcuri, localdata)
+
+ # Try a fetch from the sstate mirror, if it fails just return and
+ # we will build the package
+ try:
+ fetcher = bb.fetch2.Fetch([srcuri], localdata)
+ fetcher.download()
+
+ # Need to optimise this, if using file:// urls, the fetcher just changes the local path
+ # For now work around by symlinking
+ localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
+ if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
+ os.symlink(localpath, sstatepkg)
+
+ except bb.fetch2.BBFetchException:
+ pass
def sstate_setscene(d):
shared_state = sstate_state_fromvars(d)
@@ -353,15 +386,13 @@ python sstate_task_postfunc () {
# set as SSTATE_BUILDDIR
#
sstate_create_package () {
- # Need to remove hardcoded paths and fix these when we install the
- # staging packages.
- for i in `${SSTATE_SCAN_CMD}` ; do \
- sed -i -e s:${STAGING_DIR}:FIXMESTAGINGDIR:g $i
- echo $i | sed -e 's:${SSTATE_BUILDDIR}::' >> ${SSTATE_BUILDDIR}fixmepath
- done
-
cd ${SSTATE_BUILDDIR}
- tar -cvzf ${SSTATE_PKG} *
+ # Need to handle empty directories
+ if [ "$(ls -A)" ]; then
+ tar -czf ${SSTATE_PKG} *
+ else
+ tar -cz --file=${SSTATE_PKG} --files-from=/dev/null
+ fi
cd ${WORKDIR}
rm -rf ${SSTATE_BUILDDIR}
@@ -384,6 +415,7 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d):
# This needs to go away, FIXME
mapping = {
"do_populate_sysroot" : "populate-sysroot",
+ "do_populate_lic" : "populate-lic",
"do_package_write_ipk" : "deploy-ipk",
"do_package_write_deb" : "deploy-deb",
"do_package_write_rpm" : "deploy-rpm",
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
index 843256545..a713734c3 100644
--- a/meta/classes/staging.bbclass
+++ b/meta/classes/staging.bbclass
@@ -25,19 +25,6 @@ sysroot_stage_libdir() {
src="$1"
dest="$2"
- olddir=`pwd`
- cd $src
- las=$(find . -name \*.la -type f)
- cd $olddir
- echo "Found la files: $las"
- for i in $las
- do
- sed -e 's/^installed=yes$/installed=no/' \
- -e '/^dependency_libs=/s,${WORKDIR}[[:alnum:]/\._+-]*/\([[:alnum:]\._+-]*\),${STAGING_LIBDIR}/\1,g' \
- -e "/^dependency_libs=/s,\([[:space:]']\)${libdir},\1${STAGING_LIBDIR},g" \
- -e "/^dependency_libs=/s,\([[:space:]']\)${base_libdir},\1${STAGING_DIR_HOST}${base_libdir},g" \
- -i $src/$i
- done
sysroot_stage_dir $src $dest
}
@@ -45,46 +32,45 @@ sysroot_stage_dirs() {
from="$1"
to="$2"
- sysroot_stage_dir $from${includedir} $to${STAGING_INCDIR}
+ sysroot_stage_dir $from${includedir} $to${includedir}
if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
- sysroot_stage_dir $from${bindir} $to${STAGING_DIR_HOST}${bindir}
- sysroot_stage_dir $from${sbindir} $to${STAGING_DIR_HOST}${sbindir}
- sysroot_stage_dir $from${base_bindir} $to${STAGING_DIR_HOST}${base_bindir}
- sysroot_stage_dir $from${base_sbindir} $to${STAGING_DIR_HOST}${base_sbindir}
- sysroot_stage_dir $from${libexecdir} $to${STAGING_DIR_HOST}${libexecdir}
- sysroot_stage_dir $from${sysconfdir} $to${STAGING_DIR_HOST}${sysconfdir}
- sysroot_stage_dir $from${localstatedir} $to${STAGING_DIR_HOST}${localstatedir}
+ sysroot_stage_dir $from${bindir} $to${bindir}
+ sysroot_stage_dir $from${sbindir} $to${sbindir}
+ sysroot_stage_dir $from${base_bindir} $to${base_bindir}
+ sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
+ sysroot_stage_dir $from${libexecdir} $to${libexecdir}
+ sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
+ sysroot_stage_dir $from${localstatedir} $to${localstatedir}
fi
if [ -d $from${libdir} ]
then
- sysroot_stage_libdir $from/${libdir} $to${STAGING_LIBDIR}
+ sysroot_stage_libdir $from/${libdir} $to${libdir}
fi
if [ -d $from${base_libdir} ]
then
- sysroot_stage_libdir $from${base_libdir} $to${STAGING_DIR_HOST}${base_libdir}
+ sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
fi
- sysroot_stage_dir $from${datadir} $to${STAGING_DATADIR}
+ sysroot_stage_dir $from${datadir} $to${datadir}
}
sysroot_stage_all() {
sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
}
-do_populate_sysroot[dirs] = "${STAGING_DIR_TARGET}/${bindir} ${STAGING_DIR_TARGET}/${libdir} \
- ${STAGING_DIR_TARGET}/${includedir} \
- ${STAGING_BINDIR_NATIVE} ${STAGING_LIBDIR_NATIVE} \
- ${STAGING_INCDIR_NATIVE} \
- ${STAGING_DATADIR} \
- ${SYSROOT_DESTDIR}${STAGING_DIR_TARGET} \
- ${S} ${B}"
+do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
-# Could be compile but populate_sysroot and do_install shouldn't run at the same time
addtask populate_sysroot after do_install
SYSROOT_PREPROCESS_FUNCS ?= ""
SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/"
SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
+# We clean out any existing sstate from the sysroot if we rerun configure
+python sysroot_cleansstate () {
+ ss = sstate_state_fromvars(d, "populate_sysroot")
+ sstate_clean(ss, d)
+}
+do_configure[prefuncs] += "sysroot_cleansstate"
python do_populate_sysroot () {
#
@@ -103,15 +89,15 @@ python do_populate_sysroot () {
SSTATETASKS += "do_populate_sysroot"
do_populate_sysroot[sstate-name] = "populate-sysroot"
-do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR}"
-do_populate_sysroot[sstate-outputdirs] = "${TMPDIR}/sysroots"
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
+do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
+do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
python do_populate_sysroot_setscene () {
sstate_setscene(d)
}
addtask do_populate_sysroot_setscene
-
python () {
if bb.data.getVar('do_stage', d, True) is not None:
bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with Poky" % bb.data.getVar("FILE", d, True))
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
index 28df0f950..1027c7cef 100644
--- a/meta/classes/tinderclient.bbclass
+++ b/meta/classes/tinderclient.bbclass
@@ -367,14 +367,14 @@ def tinder_do_tinder_report(event):
addhandler tinderclient_eventhandler
python tinderclient_eventhandler() {
from bb import note, error, data
- from bb.event import NotHandled, getName
+ from bb.event import getName
if e.data is None or getName(e) == "MsgNote":
- return NotHandled
+ return
do_tinder_report = data.getVar('TINDER_REPORT', e.data, True)
if do_tinder_report and do_tinder_report == "1":
tinder_do_tinder_report(e)
- return NotHandled
+ return
}
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
index c393d9964..b2165bc27 100644
--- a/meta/classes/toolchain-scripts.bbclass
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -49,8 +49,8 @@ toolchain_create_tree_env_script () {
echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${BUILD_SYS}"' >> $script
if [ "${TARGET_OS}" = "darwin8" ]; then
- echo 'export TARGET_CFLAGS="-I${STAGING_DIR}${TARGET_SYS}${includedir}"' >> $script
- echo 'export TARGET_LDFLAGS="-L${STAGING_DIR}${TARGET_SYS}${libdir}"' >> $script
+ echo 'export TARGET_CFLAGS="-I${STAGING_DIR}${MACHINE}${includedir}"' >> $script
+ echo 'export TARGET_LDFLAGS="-L${STAGING_DIR}${MACHINE}${libdir}"' >> $script
# Workaround darwin toolchain sysroot path problems
cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
ln -s /usr/local local
@@ -60,3 +60,33 @@ toolchain_create_tree_env_script () {
echo 'export POKY_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
echo 'export POKY_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
}
+
+# This function creates an environment-setup-script for use by the ADT installer
+toolchain_create_sdk_env_script_for_installer () {
+ # Create environment setup script
+ script=${SDK_OUTPUT}/${SDKPATH}/environment-setup-${OLD_MULTIMACH_TARGET_SYS}
+ rm -f $script
+ touch $script
+ echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${OLD_MULTIMACH_TARGET_SYS}:$PATH' >> $script
+ echo 'export PKG_CONFIG_SYSROOT_DIR=##SDKTARGETSYSROOT##' >> $script
+ echo 'export PKG_CONFIG_PATH=##SDKTARGETSYSROOT##${target_libdir}/pkgconfig' >> $script
+ echo 'export CONFIG_SITE=${SDKPATH}/site-config-${OLD_MULTIMACH_TARGET_SYS}' >> $script
+ echo 'export CC=${TARGET_PREFIX}gcc' >> $script
+ echo 'export CXX=${TARGET_PREFIX}g++' >> $script
+ echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
+ echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
+ echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux"' >> $script
+ if [ "${TARGET_OS}" = "darwin8" ]; then
+ echo 'export TARGET_CFLAGS="-I##SDKTARGETSYSROOT##${target_includedir}"' >> $script
+ echo 'export TARGET_LDFLAGS="-L##SDKTARGETSYSROOT##{target_libdir}"' >> $script
+ # Workaround darwin toolchain sysroot path problems
+ cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
+ ln -s /usr/local local
+ fi
+ echo 'export CFLAGS="${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
+ echo 'export CXXFLAGS="${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
+ echo 'export POKY_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
+ echo 'export POKY_TARGET_SYSROOT="##SDKTARGETSYSROOT##"' >> $script
+ echo 'export POKY_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
+ echo 'export POKY_SDK_VERSION="${SDK_VERSION}"' >> $script
+}
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 57137c60b..7e4dda7cb 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -1,5 +1,9 @@
+UPDATERCPN ?= "${PN}"
+
DEPENDS_append = " update-rc.d-native"
-RDEPENDS_${PN}_append = " update-rc.d"
+UPDATERCD = "update-rc.d"
+UPDATERCD_virtclass-native = ""
+RDEPENDS_${UPDATERCPN}_append = " ${UPDATERCD}"
INITSCRIPT_PARAMS ?= "defaults"
@@ -69,7 +73,7 @@ python populate_packages_prepend () {
pkgs = bb.data.getVar('INITSCRIPT_PACKAGES', d, 1)
if pkgs == None:
- pkgs = bb.data.getVar('PN', d, 1)
+ pkgs = bb.data.getVar('UPDATERCPN', d, 1)
packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
if not pkgs in packages and packages != []:
pkgs = packages[0]
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
index db2297340..aeac2bce4 100644
--- a/meta/classes/utility-tasks.bbclass
+++ b/meta/classes/utility-tasks.bbclass
@@ -50,33 +50,18 @@ python do_rebuild() {
addtask checkuri
do_checkuri[nostamp] = "1"
python do_checkuri() {
- import sys
+ src_uri = (bb.data.getVar('SRC_URI', d, True) or "").split()
+ if len(src_uri) == 0:
+ return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
- src_uri = bb.data.getVar('SRC_URI', localdata, 1)
-
- try:
- bb.fetch.init(src_uri.split(),d)
- except bb.fetch.NoMethodError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("No method: %s" % value)
-
- try:
- bb.fetch.checkstatus(localdata)
- except bb.fetch.MissingParameterError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Missing parameters: %s" % value)
- except bb.fetch.FetchError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Fetch failed: %s" % value)
- except bb.fetch.MD5SumError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("MD5 failed: %s" % value)
- except:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Unknown fetch Error: %s" % value)
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.checkstatus()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
}
addtask checkuriall after do_checkuri
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
index 746f46ce5..455b49d54 100644
--- a/meta/classes/utils.bbclass
+++ b/meta/classes/utils.bbclass
@@ -51,11 +51,12 @@ def machine_paths(d):
def is_machine_specific(d):
"""Determine whether the current recipe is machine specific"""
machinepaths = set(machine_paths(d))
- urldatadict = bb.fetch.init(d.getVar("SRC_URI", True).split(), d, True)
- for urldata in (urldata for urldata in urldatadict.itervalues()
- if urldata.type == "file"):
- if any(urldata.localpath.startswith(mp + "/") for mp in machinepaths):
- return True
+ srcuri = d.getVar("SRC_URI", True).split()
+ for url in srcuri:
+ fetcher = bb.fetch2.Fetch([srcuri], d)
+ if url.startswith("file://"):
+ if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
+ return True
def oe_popen_env(d):
env = d.getVar("__oe_popen_env", False)
@@ -199,19 +200,8 @@ oe_libinstall() {
__runcmd install -m 0644 $dota $destpath/
fi
if [ -f "$dotlai" -a -n "$libtool" ]; then
- if test -n "$staging_install"
- then
- # stop libtool using the final directory name for libraries
- # in staging:
- __runcmd rm -f $destpath/$libname.la
- __runcmd sed -e 's/^installed=yes$/installed=no/' \
- -e '/^dependency_libs=/s,${WORKDIR}[[:alnum:]/\._+-]*/\([[:alnum:]\._+-]*\),${STAGING_LIBDIR}/\1,g' \
- -e "/^dependency_libs=/s,\([[:space:]']\)${libdir},\1${STAGING_LIBDIR},g" \
- $dotlai >$destpath/$libname.la
- else
- rm -f $destpath/$libname.la
- __runcmd install -m 0644 $dotlai $destpath/$libname.la
- fi
+ rm -f $destpath/$libname.la
+ __runcmd install -m 0644 $dotlai $destpath/$libname.la
fi
for name in $library_names; do