From 4fa2d11bb1fdb58ff426114169583672fc3d65b8 Mon Sep 17 00:00:00 2001 From: Chris Larson Date: Tue, 19 Sep 2006 09:04:09 +0000 Subject: Sync up.. all the deb/dpkg changes which I have locally are now in svn. git-svn-id: https://svn.o-hand.com/repos/poky/trunk@728 311d38ba-8fff-0310-9ca6-ca027cbcb966 --- meta/classes/image.bbclass | 58 ++++++++++ meta/classes/image_ipk.bbclass | 80 ------------- meta/classes/multimachine.bbclass | 2 +- meta/classes/package_deb.bbclass | 236 ++++++++++++++++++++++++++++++++++++++ meta/classes/package_ipk.bbclass | 6 +- meta/classes/package_rpm.bbclass | 1 + meta/classes/package_tar.bbclass | 1 + meta/classes/rootfs_deb.bbclass | 130 +++++++++++++++++++++ meta/classes/rootfs_ipk.bbclass | 7 +- 9 files changed, 435 insertions(+), 86 deletions(-) create mode 100644 meta/classes/image.bbclass delete mode 100644 meta/classes/image_ipk.bbclass create mode 100644 meta/classes/package_deb.bbclass create mode 100644 meta/classes/rootfs_deb.bbclass (limited to 'meta/classes') diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass new file mode 100644 index 000000000..e99526524 --- /dev/null +++ b/meta/classes/image.bbclass @@ -0,0 +1,58 @@ +inherit rootfs_${IMAGE_PKGTYPE} + +# We need to recursively follow RDEPENDS and RRECOMMENDS for images +BUILD_ALL_DEPS = "1" +do_rootfs[recrdeptask] = "do_package" + +# Images are generally built explicitly, do not need to be part of world. +EXCLUDE_FROM_WORLD = "1" + +USE_DEVFS ?= "0" + +DEPENDS += "makedevs-native" +PACKAGE_ARCH = "${MACHINE_ARCH}" + +def get_image_deps(d): + import bb + str = "" + for type in (bb.data.getVar('IMAGE_FSTYPES', d, 1) or "").split(): + deps = bb.data.getVar('IMAGE_DEPENDS_%s' % type, d) or "" + if deps: + str += " %s" % deps + return str + +DEPENDS += "${@get_image_deps(d)}" + +IMAGE_DEVICE_TABLE ?= "${@bb.which(bb.data.getVar('BBPATH', d, 1), 'files/device_table-minimal.txt')}" +IMAGE_POSTPROCESS_COMMAND ?= "" + +# Must call real_do_rootfs() from inside here, rather than as a separate +# task, so that we have a single fakeroot context for the whole process. +fakeroot do_rootfs () { + set -x + rm -rf ${IMAGE_ROOTFS} + + if [ "${USE_DEVFS}" != "1" ]; then + mkdir -p ${IMAGE_ROOTFS}/dev + makedevs -r ${IMAGE_ROOTFS} -D ${IMAGE_DEVICE_TABLE} + fi + + rootfs_${IMAGE_PKGTYPE}_do_rootfs + + ${IMAGE_PREPROCESS_COMMAND} + + export TOPDIR=${TOPDIR} + + for type in ${IMAGE_FSTYPES}; do + if test -z "$FAKEROOTKEY"; then + fakeroot -i ${TMPDIR}/fakedb.image bbimage -t $type -e ${FILE} + else + bbimage -n "${IMAGE_NAME}" -t "$type" -e "${FILE}" + fi + + rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.* + ln -s ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type + done + + ${IMAGE_POSTPROCESS_COMMAND} +} diff --git a/meta/classes/image_ipk.bbclass b/meta/classes/image_ipk.bbclass deleted file mode 100644 index d5f21c580..000000000 --- a/meta/classes/image_ipk.bbclass +++ /dev/null @@ -1,80 +0,0 @@ -inherit rootfs_ipk - -# We need to recursively follow RDEPENDS and RRECOMMENDS for images -BUILD_ALL_DEPS = "1" -do_rootfs[recrdeptask] = "do_package" - -# Images are generally built explicitly, do not need to be part of world. -EXCLUDE_FROM_WORLD = "1" - -USE_DEVFS ?= "0" - -DEPENDS += "makedevs-native" -PACKAGE_ARCH = "${MACHINE_ARCH}" - -def get_image_deps(d): - import bb - str = "" - for type in (bb.data.getVar('IMAGE_FSTYPES', d, 1) or "").split(): - deps = bb.data.getVar('IMAGE_DEPENDS_%s' % type, d) or "" - if deps: - str += " %s" % deps - return str - -DEPENDS += "${@get_image_deps(d)}" - -IMAGE_DEVICE_TABLE ?= "${@bb.which(bb.data.getVar('BBPATH', d, 1), 'files/device_table-minimal.txt')}" -IMAGE_POSTPROCESS_COMMAND ?= "" - -# Must call real_do_rootfs() from inside here, rather than as a separate -# task, so that we have a single fakeroot context for the whole process. -fakeroot do_rootfs () { - set -x - rm -rf ${IMAGE_ROOTFS} - - if [ "${USE_DEVFS}" != "1" ]; then - mkdir -p ${IMAGE_ROOTFS}/dev - makedevs -r ${IMAGE_ROOTFS} -D ${IMAGE_DEVICE_TABLE} - fi - - real_do_rootfs - - insert_feed_uris - - rm -f ${IMAGE_ROOTFS}${libdir}/ipkg/lists/oe - - ${IMAGE_PREPROCESS_COMMAND} - - export TOPDIR=${TOPDIR} - - for type in ${IMAGE_FSTYPES}; do - if test -z "$FAKEROOTKEY"; then - fakeroot -i ${TMPDIR}/fakedb.image bbimage -t $type -e ${FILE} - else - bbimage -n "${IMAGE_NAME}" -t "$type" -e "${FILE}" - fi - - rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.* - ln -s ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type - done - - ${IMAGE_POSTPROCESS_COMMAND} -} - -insert_feed_uris () { - - echo "Building feeds for [${DISTRO}].." - - for line in ${FEED_URIS} - do - # strip leading and trailing spaces/tabs, then split into name and uri - line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`" - feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`" - feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`" - - echo "Added $feed_name feed with URL $feed_uri" - - # insert new feed-sources - echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/ipkg/${feed_name}-feed.conf - done -} diff --git a/meta/classes/multimachine.bbclass b/meta/classes/multimachine.bbclass index 01dec648c..4359d6c66 100644 --- a/meta/classes/multimachine.bbclass +++ b/meta/classes/multimachine.bbclass @@ -14,7 +14,7 @@ python __anonymous () { # We could look for != PACKAGE_ARCH here but how to choose # if multiple differences are present? - # Look through IPKG_ARCHS for the priority order? + # Look through PACKAGE_ARCHS for the priority order? if pkgarch and pkgarch == macharch: multiarch = macharch diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass new file mode 100644 index 000000000..4526ac156 --- /dev/null +++ b/meta/classes/package_deb.bbclass @@ -0,0 +1,236 @@ +inherit package +DEPENDS_prepend="${@["dpkg-native ", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}" +BOOTSTRAP_EXTRA_RDEPENDS += "dpkg" +DISTRO_EXTRA_RDEPENDS += "dpkg" +PACKAGEFUNCS += "do_package_deb" +IMAGE_PKGTYPE ?= "deb" + +python package_deb_fn () { + from bb import data + bb.data.setVar('PKGFN', bb.data.getVar('PKG',d), d) +} + +addtask package_deb_install +python do_package_deb_install () { + import os, sys + pkg = bb.data.getVar('PKG', d, 1) + pkgfn = bb.data.getVar('PKGFN', d, 1) + rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1) + debdir = bb.data.getVar('DEPLOY_DIR_DEB', d, 1) + stagingdir = bb.data.getVar('STAGING_DIR', d, 1) + stagingbindir = bb.data.getVar('STAGING_BINDIR', d, 1) + tmpdir = bb.data.getVar('TMPDIR', d, 1) + + if None in (pkg,pkgfn,rootfs): + raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGE_ROOTFS)") + try: + if not os.exists(rootfs): + os.makedirs(rootfs) + os.chdir(rootfs) + except OSError: + raise bb.build.FuncFailed(str(sys.exc_value)) + + # update packages file + (exitstatus, output) = commands.getstatusoutput('dpkg-scanpackages %s > %s/Packages' % (debdir, debdir)) + if (exitstatus != 0 ): + raise bb.build.FuncFailed(output) + + f = open(os.path.join(os.path.join(tmpdir, "stamps"), "do_packages"), "w") + f.close() + + # NOTE: this env stuff is racy at best, we need something more capable + # than 'commands' for command execution, which includes manipulating the + # env of the fork+execve'd processs + + # Set up environment + apt_config = os.getenv('APT_CONFIG') + os.putenv('APT_CONFIG', os.path.join(stagingdir, 'etc', 'apt', 'apt.conf')) + path = os.getenv('PATH') + os.putenv('PATH', '%s:%s' % (stagingbindir, os.getenv('PATH'))) + + # install package + commands.getstatusoutput('apt-get update') + commands.getstatusoutput('apt-get install -y %s' % pkgfn) + + # revert environment + os.putenv('APT_CONFIG', apt_config) + os.putenv('PATH', path) +} + +python do_package_deb () { + import copy # to back up env data + import sys + import re + + workdir = bb.data.getVar('WORKDIR', d, 1) + if not workdir: + bb.error("WORKDIR not defined, unable to package") + return + + import os # path manipulations + outdir = bb.data.getVar('DEPLOY_DIR_DEB', d, 1) + if not outdir: + bb.error("DEPLOY_DIR_DEB not defined, unable to package") + return + + dvar = bb.data.getVar('D', d, 1) + if not dvar: + bb.error("D not defined, unable to package") + return + bb.mkdirhier(dvar) + + packages = bb.data.getVar('PACKAGES', d, 1) + if not packages: + bb.debug(1, "PACKAGES not defined, nothing to package") + return + + tmpdir = bb.data.getVar('TMPDIR', d, 1) + # Invalidate the packages file + if os.access(os.path.join(os.path.join(tmpdir, "stamps"),"do_packages"),os.R_OK): + os.unlink(os.path.join(os.path.join(tmpdir, "stamps"),"do_packages")) + + if packages == []: + bb.debug(1, "No packages; nothing to do") + return + + for pkg in packages.split(): + localdata = bb.data.createCopy(d) + root = "%s/install/%s" % (workdir, pkg) + + bb.data.setVar('ROOT', '', localdata) + bb.data.setVar('ROOT_%s' % pkg, root, localdata) + pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1) + if not pkgname: + pkgname = pkg + bb.data.setVar('PKG', pkgname, localdata) + + overrides = bb.data.getVar('OVERRIDES', localdata) + if not overrides: + raise bb.build.FuncFailed('OVERRIDES not defined') + overrides = bb.data.expand(overrides, localdata) + bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata) + + bb.data.update_data(localdata) + basedir = os.path.join(os.path.dirname(root)) + + pkgoutdir = os.path.join(outdir, bb.data.getVar('PACKAGE_ARCH', localdata, 1)) + bb.mkdirhier(pkgoutdir) + + os.chdir(root) + from glob import glob + g = glob('*') + try: + del g[g.index('DEBIAN')] + del g[g.index('./DEBIAN')] + except ValueError: + pass + if not g and not bb.data.getVar('ALLOW_EMPTY', localdata): + from bb import note + note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1))) + continue + controldir = os.path.join(root, 'DEBIAN') + bb.mkdirhier(controldir) + try: + ctrlfile = file(os.path.join(controldir, 'control'), 'wb') + # import codecs + # ctrlfile = codecs.open("someFile", "w", "utf-8") + except OSError: + raise bb.build.FuncFailed("unable to open control file for writing.") + + fields = [] + fields.append(["Version: %s-%s\n", ['PV', 'PR']]) + fields.append(["Description: %s\n", ['DESCRIPTION']]) + fields.append(["Section: %s\n", ['SECTION']]) + fields.append(["Priority: %s\n", ['PRIORITY']]) + fields.append(["Maintainer: %s\n", ['MAINTAINER']]) + fields.append(["Architecture: %s\n", ['TARGET_ARCH']]) + fields.append(["OE: %s\n", ['P']]) + fields.append(["Homepage: %s\n", ['HOMEPAGE']]) + +# Package, Version, Maintainer, Description - mandatory +# Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional + + + def pullData(l, d): + l2 = [] + for i in l: + l2.append(bb.data.getVar(i, d, 1)) + return l2 + + ctrlfile.write("Package: %s\n" % pkgname) + # check for required fields + try: + for (c, fs) in fields: + for f in fs: + if bb.data.getVar(f, localdata) is None: + raise KeyError(f) + ctrlfile.write(unicode(c % tuple(pullData(fs, localdata)))) + except KeyError: + (type, value, traceback) = sys.exc_info() + ctrlfile.close() + raise bb.build.FuncFailed("Missing field for deb generation: %s" % value) + # more fields + + bb.build.exec_func("mapping_rename_hook", localdata) + + rdepends = explode_deps(unicode(bb.data.getVar("RDEPENDS", localdata, 1) or "")) + rdepends = [dep for dep in rdepends if not '*' in dep] + rrecommends = explode_deps(unicode(bb.data.getVar("RRECOMMENDS", localdata, 1) or "")) + rrecommends = [rec for rec in rrecommends if not '*' in rec] + rsuggests = (unicode(bb.data.getVar("RSUGGESTS", localdata, 1) or "")).split() + rprovides = (unicode(bb.data.getVar("RPROVIDES", localdata, 1) or "")).split() + rreplaces = (unicode(bb.data.getVar("RREPLACES", localdata, 1) or "")).split() + rconflicts = (unicode(bb.data.getVar("RCONFLICTS", localdata, 1) or "")).split() + if rdepends: + ctrlfile.write(u"Depends: %s\n" % ", ".join(rdepends)) + if rsuggests: + ctrlfile.write(u"Suggests: %s\n" % ", ".join(rsuggests)) + if rrecommends: + ctrlfile.write(u"Recommends: %s\n" % ", ".join(rrecommends)) + if rprovides: + ctrlfile.write(u"Provides: %s\n" % ", ".join(rprovides)) + if rreplaces: + ctrlfile.write(u"Replaces: %s\n" % ", ".join(rreplaces)) + if rconflicts: + ctrlfile.write(u"Conflicts: %s\n" % ", ".join(rconflicts)) + ctrlfile.close() + + for script in ["preinst", "postinst", "prerm", "postrm"]: + scriptvar = bb.data.getVar('pkg_%s' % script, localdata, 1) + if not scriptvar: + continue + try: + scriptfile = file(os.path.join(controldir, script), 'w') + except OSError: + raise bb.build.FuncFailed("unable to open %s script file for writing." % script) + scriptfile.write(scriptvar) + scriptfile.close() + os.chmod(os.path.join(controldir, script), 0755) + + conffiles_str = bb.data.getVar("CONFFILES", localdata, 1) + if conffiles_str: + try: + conffiles = file(os.path.join(controldir, 'conffiles'), 'w') + except OSError: + raise bb.build.FuncFailed("unable to open conffiles for writing.") + for f in conffiles_str.split(): + conffiles.write('%s\n' % f) + conffiles.close() + + os.chdir(basedir) + ret = os.system("PATH=\"%s\" dpkg-deb -b %s %s" % (bb.data.getVar("PATH", localdata, 1), root, pkgoutdir)) + if ret != 0: + raise bb.build.FuncFailed("dpkg-deb execution failed") + + for script in ["preinst", "postinst", "prerm", "postrm", "control" ]: + scriptfile = os.path.join(controldir, script) + try: + os.remove(scriptfile) + except OSError: + pass + try: + os.rmdir(controldir) + except OSError: + pass + del localdata +} diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass index 9ae526bb3..2847cee6e 100644 --- a/meta/classes/package_ipk.bbclass +++ b/meta/classes/package_ipk.bbclass @@ -1,7 +1,9 @@ inherit package DEPENDS_prepend="${@["ipkg-utils-native ", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}" BOOTSTRAP_EXTRA_RDEPENDS += "ipkg-collateral ipkg ipkg-link" +DISTRO_EXTRA_RDEPENDS += "ipkg-collateral ipkg ipkg-link" PACKAGEFUNCS += "do_package_ipk" +IMAGE_PKGTYPE ?= "ipk" python package_ipk_fn () { from bb import data @@ -30,9 +32,9 @@ python package_ipk_install () { # Generate ipk.conf if it or the stamp doesnt exist conffile = os.path.join(stagingdir,"ipkg.conf") if not os.access(conffile, os.R_OK): - ipkg_archs = bb.data.getVar('IPKG_ARCHS',d) + ipkg_archs = bb.data.getVar('PACKAGE_ARCHS',d) if ipkg_archs is None: - bb.error("IPKG_ARCHS missing") + bb.error("PACKAGE_ARCHS missing") raise FuncFailed ipkg_archs = ipkg_archs.split() arch_priority = 1 diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass index c29ab5f42..ee579bed5 100644 --- a/meta/classes/package_rpm.bbclass +++ b/meta/classes/package_rpm.bbclass @@ -3,6 +3,7 @@ inherit rpm_core RPMBUILD="rpmbuild --short-circuit ${RPMOPTS}" PACKAGEFUNCS += "do_package_rpm" +IMAGE_PKGTYPE ?= "rpm" python write_specfile() { from bb import data, build diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass index 359e35f11..d8c7919c3 100644 --- a/meta/classes/package_tar.bbclass +++ b/meta/classes/package_tar.bbclass @@ -1,6 +1,7 @@ inherit package PACKAGEFUNCS += "do_package_tar" +IMAGE_PKGTYPE ?= "tar" python package_tar_fn () { import os diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass new file mode 100644 index 000000000..7712911b0 --- /dev/null +++ b/meta/classes/rootfs_deb.bbclass @@ -0,0 +1,130 @@ +DEPENDS_prepend = "dpkg-native apt-native fakeroot-native " +DEPENDS_append = " ${EXTRA_IMAGEDEPENDS}" + +PACKAGES = "" + +do_rootfs[nostamp] = 1 +do_rootfs[dirs] = ${TOPDIR} +do_build[nostamp] = 1 + +ROOTFS_POSTPROCESS_COMMAND ?= "" + +PID = "${@os.getpid()}" + +# some default locales +IMAGE_LINGUAS ?= "de-de fr-fr en-gb" + +LINGUAS_INSTALL = "${@" ".join(map(lambda s: "locale-base-%s" % s, bb.data.getVar('IMAGE_LINGUAS', d, 1).split()))}" + +fakeroot rootfs_deb_do_rootfs () { + set +e + mkdir -p ${IMAGE_ROOTFS}/var/dpkg/{info,updates} + + rm -f ${STAGING_DIR}/etc/apt/sources.list + rm -f ${STAGING_DIR}/etc/apt/preferences + > ${IMAGE_ROOTFS}/var/dpkg/status + > ${IMAGE_ROOTFS}/var/dpkg/available + # > ${STAGING_DIR}/var/dpkg/status + + priority=1 + for arch in ${PACKAGE_ARCHS}; do + if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then + continue; + fi + cd ${DEPLOY_DIR_DEB}/$arch + # if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then + rm -f Packages.gz Packages Packages.bz2 + # fi + apt-ftparchive packages . | bzip2 > Packages.bz2 + + echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_DIR}/etc/apt/sources.list + (echo "Package: *" + echo "Pin origin ${DEPLOY_DIR_DEB}/$arch" + echo "Pin-Priority: $((800 + $priority))") >> ${STAGING_DIR}/etc/apt/preferences + priority=$(expr $priority + 5) + done + + export APT_CONFIG="${STAGING_DIR}/etc/apt/apt.conf" + export D=${IMAGE_ROOTFS} + export OFFLINE_ROOT=${IMAGE_ROOTFS} + export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS} + + apt-get update + + _flag () { + sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/dpkg/status + } + _getflag () { + cat ${IMAGE_ROOTFS}/var/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}" + } + + if [ ! -z "${LINGUAS_INSTALL}" ]; then + apt-get install glibc-localedata-i18n + if [ $? -eq 1 ]; then + exit 1 + fi + for i in ${LINGUAS_INSTALL}; do + apt-get install $i + if [ $? -eq 1 ]; then + exit 1 + fi + done + fi + + if [ ! -z "${PACKAGE_INSTALL}" ]; then + for i in ${PACKAGE_INSTALL}; do + apt-get install $i + if [ $? -eq 1 ]; then + exit 1 + fi + find ${IMAGE_ROOTFS} -name \*.dpkg-new | for i in `cat`; do + mv $i `echo $i | sed -e's,\.dpkg-new$,,'` + done + done + fi + + install -d ${IMAGE_ROOTFS}/${sysconfdir} + echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version + + for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.preinst; do + if [ -f $i ] && ! sh $i; then + _flag unpacked `basename $i .preinst` + fi + done + + for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.postinst; do + if [ -f $i ] && ! sh $i configure; then + _flag unpacked `basename $i .postinst` + fi + done + + set -e + + ${ROOTFS_POSTPROCESS_COMMAND} +} + +# set '*' as the rootpassword so the images +# can decide if they want it or not + +zap_root_password () { + sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new + mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd +} + +create_etc_timestamp() { + date +%2m%2d%2H%2M%Y >${IMAGE_ROOTFS}/etc/timestamp +} + +# Turn any symbolic /sbin/init link into a file +remove_init_link () { + if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then + LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init` + rm ${IMAGE_ROOTFS}/sbin/init + cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init + fi +} + +# export the zap_root_password, create_etc_timestamp and remote_init_link +EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link do_rootfs + +addtask rootfs before do_build after do_install diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass index 8f9fddde1..e49466606 100644 --- a/meta/classes/rootfs_ipk.bbclass +++ b/meta/classes/rootfs_ipk.bbclass @@ -36,7 +36,7 @@ real_do_rootfs () { fi mkdir -p ${T} echo "src oe file:${DEPLOY_DIR_IPK}" > ${T}/ipkg.conf - ipkgarchs="${IPKG_ARCHS}" + ipkgarchs="${PACKAGE_ARCHS}" priority=1 for arch in $ipkgarchs; do echo "arch $arch $priority" >> ${T}/ipkg.conf @@ -49,11 +49,12 @@ real_do_rootfs () { ipkg-cl ${IPKG_ARGS} install $i done fi - if [ ! -z "${IPKG_INSTALL}" ]; then - ipkg-cl ${IPKG_ARGS} install ${IPKG_INSTALL} + if [ ! -z "${PACKAGE_INSTALL}" ]; then + ipkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL} fi export D=${IMAGE_ROOTFS} + export OFFLINE_ROOT=${IMAGE_ROOTFS} export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS} mkdir -p ${IMAGE_ROOTFS}/etc/ipkg/ grep "^arch" ${T}/ipkg.conf >${IMAGE_ROOTFS}/etc/ipkg/arch.conf -- cgit v1.2.3