summaryrefslogtreecommitdiff
path: root/meta/classes
diff options
context:
space:
mode:
Diffstat (limited to 'meta/classes')
-rw-r--r--meta/classes/allarch.bbclass22
-rw-r--r--meta/classes/archive-configured-source.bbclass14
-rw-r--r--meta/classes/archive-original-source.bbclass14
-rw-r--r--meta/classes/archive-patched-source.bbclass14
-rw-r--r--meta/classes/archiver.bbclass452
-rw-r--r--meta/classes/autotools.bbclass223
-rw-r--r--meta/classes/autotools_stage.bbclass3
-rw-r--r--meta/classes/base.bbclass1389
-rw-r--r--meta/classes/base_srpm.bbclass20
-rw-r--r--meta/classes/binconfig.bbclass47
-rw-r--r--meta/classes/blacklist.bbclass20
-rw-r--r--meta/classes/boot-directdisk.bbclass100
-rw-r--r--meta/classes/bootimg.bbclass220
-rw-r--r--meta/classes/bugzilla.bbclass186
-rw-r--r--meta/classes/buildhistory.bbclass426
-rw-r--r--meta/classes/buildstats.bbclass281
-rw-r--r--meta/classes/ccache.inc11
-rw-r--r--meta/classes/ccdv.bbclass21
-rw-r--r--meta/classes/cmake.bbclass107
-rw-r--r--meta/classes/cml1.bbclass9
-rw-r--r--meta/classes/copyleft_compliance.bbclass104
-rw-r--r--meta/classes/core-image.bbclass72
-rw-r--r--meta/classes/cpan-base.bbclass31
-rw-r--r--meta/classes/cpan.bbclass65
-rw-r--r--meta/classes/cpan_build.bbclass37
-rw-r--r--meta/classes/cross-canadian.bbclass89
-rw-r--r--meta/classes/cross.bbclass82
-rw-r--r--meta/classes/crosssdk.bbclass30
-rw-r--r--meta/classes/debian.bbclass48
-rw-r--r--meta/classes/deploy.bbclass11
-rw-r--r--meta/classes/devshell.bbclass23
-rw-r--r--meta/classes/distrodata.bbclass763
-rw-r--r--meta/classes/distutils-base.bbclass19
-rw-r--r--meta/classes/distutils-common-base.bbclass21
-rw-r--r--meta/classes/distutils-native-base.bbclass3
-rw-r--r--meta/classes/distutils.bbclass61
-rw-r--r--meta/classes/dummy.bbclass2
-rw-r--r--meta/classes/externalsrc.bbclass53
-rw-r--r--meta/classes/flow-lossage.bbclass5
-rw-r--r--meta/classes/gconf.bbclass30
-rw-r--r--meta/classes/gettext.bbclass31
-rw-r--r--meta/classes/gnome.bbclass21
-rw-r--r--meta/classes/gnomebase.bbclass30
-rw-r--r--meta/classes/grub-efi.bbclass116
-rw-r--r--meta/classes/gtk-icon-cache.bbclass43
-rw-r--r--meta/classes/gzipnative.bbclass3
-rw-r--r--meta/classes/icecc.bbclass396
-rw-r--r--meta/classes/image-empty.bbclass0
-rw-r--r--meta/classes/image-live.bbclass15
-rw-r--r--meta/classes/image-mklibs.bbclass72
-rw-r--r--meta/classes/image-prelink.bbclass35
-rw-r--r--meta/classes/image-swab.bbclass98
-rw-r--r--meta/classes/image-vmdk.bbclass34
-rw-r--r--meta/classes/image.bbclass275
-rw-r--r--meta/classes/image_types.bbclass232
-rw-r--r--meta/classes/image_types_uboot.bbclass23
-rw-r--r--meta/classes/imagetest-dummy.bbclass1
-rw-r--r--meta/classes/imagetest-qemu.bbclass223
-rw-r--r--meta/classes/insane.bbclass809
-rw-r--r--meta/classes/insserv.bbclass2
-rw-r--r--meta/classes/kernel-arch.bbclass37
-rw-r--r--meta/classes/kernel-yocto.bbclass294
-rw-r--r--meta/classes/kernel.bbclass461
-rw-r--r--meta/classes/lib_package.bbclass5
-rw-r--r--meta/classes/libc-common.bbclass35
-rw-r--r--meta/classes/libc-package.bbclass384
-rw-r--r--meta/classes/license.bbclass390
-rw-r--r--meta/classes/linux-kernel-base.bbclass14
-rw-r--r--meta/classes/logging.bbclass72
-rw-r--r--meta/classes/metadata_scm.bbclass77
-rw-r--r--meta/classes/mime.bbclass60
-rw-r--r--meta/classes/mirrors.bbclass66
-rw-r--r--meta/classes/module-base.bbclass7
-rw-r--r--meta/classes/module.bbclass27
-rw-r--r--meta/classes/module_strip.bbclass22
-rw-r--r--meta/classes/mozilla.bbclass53
-rw-r--r--meta/classes/multilib.bbclass99
-rw-r--r--meta/classes/multilib_global.bbclass38
-rw-r--r--meta/classes/multilib_header.bbclass29
-rw-r--r--meta/classes/native.bbclass168
-rw-r--r--meta/classes/nativesdk.bbclass111
-rw-r--r--meta/classes/openmoko-base.bbclass20
-rw-r--r--meta/classes/openmoko-panel-plugin.bbclass6
-rw-r--r--meta/classes/openmoko.bbclass3
-rw-r--r--meta/classes/openmoko2.bbclass33
-rw-r--r--meta/classes/own-mirrors.bbclass12
-rw-r--r--meta/classes/package.bbclass1323
-rw-r--r--meta/classes/package_deb.bbclass313
-rw-r--r--meta/classes/package_ipk.bbclass352
-rw-r--r--meta/classes/package_rpm.bbclass1237
-rw-r--r--meta/classes/package_tar.bbclass55
-rw-r--r--meta/classes/packaged-staging.bbclass440
-rw-r--r--meta/classes/packagedata.bbclass83
-rw-r--r--meta/classes/packageinfo.bbclass46
-rw-r--r--meta/classes/patch.bbclass571
-rw-r--r--meta/classes/patcher.bbclass7
-rw-r--r--meta/classes/perlnative.bbclass3
-rw-r--r--meta/classes/pkg_distribute.bbclass2
-rw-r--r--meta/classes/pkg_metainfo.bbclass12
-rw-r--r--meta/classes/pkgconfig.bbclass14
-rw-r--r--meta/classes/poky-autobuild-notifier.bbclass62
-rw-r--r--meta/classes/poky-image.bbclass97
-rw-r--r--meta/classes/poky.bbclass5
-rw-r--r--meta/classes/populate_sdk.bbclass89
-rw-r--r--meta/classes/populate_sdk_deb.bbclass61
-rw-r--r--meta/classes/populate_sdk_ipk.bbclass49
-rw-r--r--meta/classes/populate_sdk_rpm.bbclass132
-rw-r--r--meta/classes/prexport.bbclass47
-rw-r--r--meta/classes/primport.bbclass20
-rw-r--r--meta/classes/prserv.bbclass21
-rw-r--r--meta/classes/python-dir.bbclass3
-rw-r--r--meta/classes/qemu.bbclass15
-rw-r--r--meta/classes/qmake2.bbclass14
-rw-r--r--meta/classes/qmake_base.bbclass32
-rw-r--r--meta/classes/qt4e.bbclass19
-rw-r--r--meta/classes/qt4x11.bbclass10
-rw-r--r--meta/classes/recipe_sanity.bbclass179
-rw-r--r--meta/classes/relocatable.bbclass94
-rw-r--r--meta/classes/rm_work.bbclass50
-rw-r--r--meta/classes/rootfs_deb.bbclass192
-rw-r--r--meta/classes/rootfs_ipk.bbclass187
-rw-r--r--meta/classes/rootfs_rpm.bbclass331
-rw-r--r--meta/classes/sanity.bbclass643
-rw-r--r--meta/classes/scons.bbclass10
-rw-r--r--meta/classes/sdk.bbclass106
-rw-r--r--meta/classes/sdl.bbclass38
-rw-r--r--meta/classes/setuptools.bbclass8
-rw-r--r--meta/classes/singlemachine.bbclass15
-rw-r--r--meta/classes/sip.bbclass6
-rw-r--r--meta/classes/siteconfig.bbclass32
-rw-r--r--meta/classes/siteinfo.bbclass221
-rw-r--r--meta/classes/sourcepkg.bbclass111
-rw-r--r--meta/classes/src_distribute.bbclass27
-rw-r--r--meta/classes/src_distribute_local.bbclass31
-rw-r--r--meta/classes/srec.bbclass28
-rw-r--r--meta/classes/sstate.bbclass558
-rw-r--r--meta/classes/staging.bbclass121
-rw-r--r--meta/classes/syslinux.bbclass111
-rw-r--r--meta/classes/task.bbclass4
-rw-r--r--meta/classes/terminal.bbclass41
-rw-r--r--meta/classes/tinderclient.bbclass14
-rw-r--r--meta/classes/tmake.bbclass77
-rw-r--r--meta/classes/toolchain-scripts.bbclass148
-rw-r--r--meta/classes/typecheck.bbclass12
-rw-r--r--meta/classes/update-alternatives.bbclass103
-rw-r--r--meta/classes/update-rc.d.bbclass63
-rw-r--r--meta/classes/useradd.bbclass214
-rw-r--r--meta/classes/utility-tasks.bbclass65
-rw-r--r--meta/classes/utils.bbclass385
-rw-r--r--meta/classes/xfce.bbclass20
-rw-r--r--meta/classes/xlibs.bbclass15
151 files changed, 13938 insertions, 5566 deletions
diff --git a/meta/classes/allarch.bbclass b/meta/classes/allarch.bbclass
new file mode 100644
index 000000000..18c9ae28e
--- /dev/null
+++ b/meta/classes/allarch.bbclass
@@ -0,0 +1,22 @@
+#
+# This class is used for architecture independent recipes/data files (usally scripts)
+#
+
+PACKAGE_ARCH = "all"
+
+# No need for virtual/libc or a cross compiler
+INHIBIT_DEFAULT_DEPS = "1"
+
+# Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
+# naming anyway
+TARGET_ARCH = "allarch"
+TARGET_OS = "linux"
+TARGET_CC_ARCH = "none"
+TARGET_LD_ARCH = "none"
+TARGET_AS_ARCH = "none"
+PACKAGE_EXTRA_ARCHS = ""
+
+# No need to do shared library processing or debug symbol handling
+EXCLUDE_FROM_SHLIBS = "1"
+INHIBIT_PACKAGE_DEBUG_SPLIT = "1"
+INHIBIT_PACKAGE_STRIP = "1"
diff --git a/meta/classes/archive-configured-source.bbclass b/meta/classes/archive-configured-source.bbclass
new file mode 100644
index 000000000..1a609b36d
--- /dev/null
+++ b/meta/classes/archive-configured-source.bbclass
@@ -0,0 +1,14 @@
+# This file is for getting archiving packages with configured sources(archive 's' after configure stage),logs(archive 'temp' after package_write_rpm),dump data
+# and creating diff file(get all environment variables and functions in building and mapping all content in 's' including patches to xxx.diff.gz.
+# All archived packages will be deployed in ${DEPLOY_DIR}/sources
+
+inherit archiver
+
+# Get archiving package with configured sources including patches
+do_configure[postfuncs] += "do_archive_configured_sources "
+
+# Get archiving package with temp(logs) and scripts(.bb and inc files)
+do_package_write_rpm[prefuncs] += "do_archive_scripts_logs "
+
+# Get dump date and create diff file
+do_package_write_rpm[postfuncs] += "do_dumpdata_create_diff_gz "
diff --git a/meta/classes/archive-original-source.bbclass b/meta/classes/archive-original-source.bbclass
new file mode 100644
index 000000000..b08553365
--- /dev/null
+++ b/meta/classes/archive-original-source.bbclass
@@ -0,0 +1,14 @@
+# This file is for getting archiving packages with original sources(archive 's' after unpack stage),patches,logs(archive 'temp' after package_write_rpm),dump data and
+# creating diff file(get all environment variables and functions in building and mapping all content in 's' including patches to xxx.diff.gz.
+# All archived packages will be deployed in ${DEPLOY_DIR}/sources
+
+inherit archiver
+
+# Get original sources archiving package with patches
+do_unpack[postfuncs] += "do_archive_original_sources_patches "
+
+# Get archiving package with temp(logs) and scripts(.bb and inc files)
+do_package_write_rpm[prefuncs] += "do_archive_scripts_logs "
+
+# Get dump date and create diff file
+do_package_write_rpm[postfuncs] += "do_dumpdata_create_diff_gz "
diff --git a/meta/classes/archive-patched-source.bbclass b/meta/classes/archive-patched-source.bbclass
new file mode 100644
index 000000000..a6d368f2c
--- /dev/null
+++ b/meta/classes/archive-patched-source.bbclass
@@ -0,0 +1,14 @@
+# This file is for getting archiving packages with patched sources(archive 's' before do_patch stage),logs(archive 'temp' after package_write_rpm),dump data and
+# creating diff file(get all environment variables and functions in building and mapping all content in 's' including patches to xxx.diff.gz.
+# All archived packages will be deployed in ${DEPLOY_DIR}/sources
+
+inherit archiver
+
+# Get archiving package with patched sources including patches
+do_patch[postfuncs] += "do_archive_patched_sources "
+
+# Get archiving package with logs(temp) and scripts(.bb and .inc files)
+do_package_write_rpm[prefuncs] += "do_archive_scripts_logs "
+
+# Get dump date and create diff file
+do_package_write_rpm[postfuncs] += "do_dumpdata_create_diff_gz "
diff --git a/meta/classes/archiver.bbclass b/meta/classes/archiver.bbclass
new file mode 100644
index 000000000..ac8aa957e
--- /dev/null
+++ b/meta/classes/archiver.bbclass
@@ -0,0 +1,452 @@
+# This file is used for archiving sources ,patches,and logs to tarball.
+# It also output building environment to xxx.dump.data and create xxx.diff.gz to record
+# all content in ${S} to a diff file.
+
+ARCHIVE_EXCLUDE_FROM ?= ".pc autom4te.cache"
+ARCHIVE_TYPE ?= "TAR SRPM"
+DISTRO ?= "poky"
+PATCHES_ARCHIVE_WITH_SERIES = 'TRUE'
+SOURCE_ARCHIVE_LOG_WITH_SCRIPTS ?= 'logs_with_scripts'
+SOURCE_ARCHIVE_PACKAGE_TYPE ?= 'tar'
+
+def get_bb_inc(d):
+ '''create a directory "script-logs" including .bb and .inc file in ${WORKDIR}'''
+ import re
+ import os
+ import shutil
+
+ bbinc = []
+ pat=re.compile('require\s*([^\s]*\.*)(.*)')
+ work_dir = d.getVar('WORKDIR', True)
+ bbfile = d.getVar('FILE', True)
+ bbdir = os.path.dirname(bbfile)
+ script_logs = os.path.join(work_dir,'script-logs')
+ bb_inc = os.path.join(script_logs,'bb_inc')
+ bb.mkdirhier(script_logs)
+ bb.mkdirhier(bb_inc)
+
+ def find_file(dir,file):
+ for root, dirs, files in os.walk(dir):
+ if file in files:
+ return os.path.join(root,file)
+
+ def get_inc (file):
+ f = open(file,'r')
+ for line in f.readlines():
+ if 'require' not in line:
+ bbinc.append(file)
+ else:
+ try:
+ incfile = pat.match(line).group(1)
+ incfile = bb.data.expand(os.path.basename(incfile),d)
+ abs_incfile = find_file(bbdir,incfile)
+ if abs_incfile:
+ bbinc.append(abs_incfile)
+ get_inc(abs_incfile)
+ except AttributeError:
+ pass
+ get_inc(bbfile)
+ bbinc = list(set(bbinc))
+ for bbincfile in bbinc:
+ shutil.copy(bbincfile,bb_inc)
+
+ try:
+ bb.mkdirhier(os.path.join(script_logs,'temp'))
+ oe.path.copytree(os.path.join(work_dir,'temp'), os.path.join(script_logs,'temp'))
+ except (IOError,AttributeError):
+ pass
+ return script_logs
+
+def get_series(d):
+ '''copy patches and series file to a pointed directory which will be archived to tarball in ${WORKDIR}'''
+ import shutil
+
+ src_patches=[]
+ pf = d.getVar('PF', True)
+ work_dir = d.getVar('WORKDIR', True)
+ s = d.getVar('S',True)
+ dest = os.path.join(work_dir, pf + '-series')
+ shutil.rmtree(dest, ignore_errors=True)
+ bb.mkdirhier(dest)
+
+ src_uri = d.getVar('SRC_URI', True).split()
+ fetch = bb.fetch2.Fetch(src_uri, d)
+ locals = (fetch.localpath(url) for url in fetch.urls)
+ for local in locals:
+ src_patches.append(local)
+ if not cmp(work_dir,s):
+ tmp_list = src_patches
+ else:
+ tmp_list = src_patches[1:]
+
+ for patch in tmp_list:
+ try:
+ shutil.copy(patch,dest)
+ except IOError:
+ if os.path.isdir(patch):
+ bb.mkdirhier(os.path.join(dest,patch))
+ oe.path.copytree(patch, os.path.join(dest,patch))
+ return dest
+
+def get_applying_patches(d):
+ """only copy applying patches to a pointed directory which will be archived to tarball"""
+ import os
+ import shutil
+
+
+ pf = d.getVar('PF', True)
+ work_dir = d.getVar('WORKDIR', True)
+ dest = os.path.join(work_dir, pf + '-patches')
+ shutil.rmtree(dest, ignore_errors=True)
+ bb.mkdirhier(dest)
+
+
+ patches = src_patches(d)
+ for patch in patches:
+ _, _, local, _, _, parm = bb.decodeurl(patch)
+ if local:
+ shutil.copy(local,dest)
+ return dest
+
+def not_tarball(d):
+ '''packages including key words 'work-shared','native', 'task-' will be passed'''
+ import os
+
+ workdir = d.getVar('WORKDIR',True)
+ s = d.getVar('S',True)
+ if 'work-shared' in s or 'task-' in workdir or 'native' in workdir:
+ return True
+ else:
+ return False
+
+def get_source_from_downloads(d,stage_name):
+ '''copy tarball of $P to $WORKDIR when this tarball exists in $DL_DIR'''
+ if stage_name in 'patched' 'configured':
+ return
+ pf = d.getVar('PF', True)
+ dl_dir = d.getVar('DL_DIR',True)
+ try:
+ source = os.path.join(dl_dir,os.path.basename(d.getVar('SRC_URI', True).split()[0]))
+ if os.path.exists(source) and not os.path.isdir(source):
+ return source
+ except (IndexError, OSError):
+ pass
+ return ''
+
+def do_tarball(workdir,srcdir,tarname):
+ '''tar "srcdir" under "workdir" to "tarname"'''
+ import tarfile
+
+ sav_dir = os.getcwd()
+ os.chdir(workdir)
+ if (len(os.listdir(srcdir))) != 0:
+ tar = tarfile.open(tarname, "w:gz")
+ tar.add(srcdir)
+ tar.close()
+ else:
+ tarname = ''
+ os.chdir(sav_dir)
+ return tarname
+
+def archive_sources_from_directory(d,stage_name):
+ '''archive sources codes tree to tarball when tarball of $P doesn't exist in $DL_DIR'''
+ import shutil
+
+ s = d.getVar('S',True)
+ work_dir=d.getVar('WORKDIR', True)
+ PF = d.getVar('PF',True)
+ tarname = PF + '-' + stage_name + ".tar.gz"
+
+ if os.path.exists(s) and work_dir in s:
+ try:
+ source_dir = os.path.join(work_dir,[ i for i in s.replace(work_dir,'').split('/') if i][0])
+ except IndexError:
+ if not cmp(s,work_dir):
+ return ''
+ else:
+ return ''
+ source = os.path.basename(source_dir)
+ return do_tarball(work_dir,source,tarname)
+
+def archive_sources(d,stage_name):
+ '''copy tarball from $DL_DIR to $WORKDIR if have tarball, archive source codes tree in $WORKDIR if $P is directory instead of tarball'''
+ import shutil
+ work_dir = d.getVar('WORKDIR',True)
+ file = get_source_from_downloads(d,stage_name)
+ if file:
+ shutil.copy(file,work_dir)
+ file = os.path.basename(file)
+ else:
+ file = archive_sources_from_directory(d,stage_name)
+ return file
+
+
+def archive_patches(d,patchdir,series):
+ '''archive patches to tarball and also include series files if 'series' is True'''
+ import shutil
+
+ s = d.getVar('S',True)
+ work_dir = d.getVar('WORKDIR', True)
+ patch_dir = os.path.basename(patchdir)
+ tarname = patch_dir + ".tar.gz"
+ if series == 'all' and os.path.exists(os.path.join(s,'patches/series')):
+ shutil.copy(os.path.join(s,'patches/series'),patchdir)
+ tarname = do_tarball(work_dir,patch_dir,tarname)
+ shutil.rmtree(patchdir, ignore_errors=True)
+ return tarname
+
+def select_archive_patches(d,option):
+ '''select to archive all patches including non-applying and series or applying patches '''
+ if option == "all":
+ patchdir = get_series(d)
+ elif option == "applying":
+ patchdir = get_applying_patches(d)
+ try:
+ os.rmdir(patchdir)
+ except OSError:
+ tarpatch = archive_patches(d,patchdir,option)
+ return tarpatch
+ return
+
+def archive_logs(d,logdir,bbinc=False):
+ '''archive logs in temp to tarball and .bb and .inc files if bbinc is True '''
+ import shutil
+
+ pf = d.getVar('PF',True)
+ work_dir = d.getVar('WORKDIR',True)
+ log_dir = os.path.basename(logdir)
+ tarname = pf + '-' + log_dir + ".tar.gz"
+ tarname = do_tarball(work_dir,log_dir,tarname)
+ if bbinc:
+ shutil.rmtree(logdir, ignore_errors=True)
+ return tarname
+
+def get_licenses(d):
+ '''get licenses for running .bb file'''
+ licenses = d.getVar('LICENSE', 1).replace('&', '|')
+ licenses = licenses.replace('(', '').replace(')', '')
+ clean_licenses = ""
+ for x in licenses.split():
+ if x.strip() == '' or x == 'CLOSED':
+ continue
+ if x != "|":
+ clean_licenses += x
+ if '|' in clean_licenses:
+ clean_licenses = clean_licenses.replace('|','')
+ return clean_licenses
+
+
+def move_tarball_deploy(d,tarball_list):
+ '''move tarball in location to ${DEPLOY_DIR}/sources'''
+ import shutil
+
+ if tarball_list is []:
+ return
+ target_sys = d.getVar('TARGET_SYS', True)
+ pf = d.getVar('PF', True)
+ licenses = get_licenses(d)
+ work_dir = d.getVar('WORKDIR',True)
+ tar_sources = d.getVar('DEPLOY_DIR', True) + '/sources/' + target_sys + '/' + licenses + '/' + pf
+ if not os.path.exists(tar_sources):
+ bb.mkdirhier(tar_sources)
+ for source in tarball_list:
+ if source:
+ if os.path.exists(os.path.join(tar_sources, source)):
+ os.remove(os.path.join(tar_sources,source))
+ shutil.move(os.path.join(work_dir,source),tar_sources)
+
+def check_archiving_type(d):
+ '''check the type for archiving package('tar' or 'srpm')'''
+ try:
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() not in d.getVar('ARCHIVE_TYPE', True).split():
+ raise AttributeError
+ except AttributeError:
+ bb.fatal("\"SOURCE_ARCHIVE_PACKAGE_TYPE\" is \'tar\' or \'srpm\', no other types")
+
+def store_package(d,package_name):
+ '''store tarbablls name to file "tar-package"'''
+ try:
+ f = open(os.path.join(d.getVar('WORKDIR',True),'tar-package'),'a')
+ f.write(package_name + ' ')
+ f.close()
+ except IOError:
+ pass
+
+def get_package(d):
+ '''get tarballs name from "tar-package"'''
+ work_dir = (d.getVar('WORKDIR', True))
+ tarpackage = os.path.join(work_dir,'tar-package')
+ try:
+ f = open(tarpackage,'r')
+ line = list(set(f.readline().replace('\n','').split()))
+ except UnboundLocalError,IOError:
+ pass
+ f.close()
+ return line
+
+
+def archive_sources_patches(d,stage_name):
+ '''archive sources and patches to tarball. stage_name will append strings ${stage_name} to ${PR} as middle name. for example, zlib-1.4.6-prepatch(stage_name).tar.gz '''
+ import shutil
+
+ check_archiving_type(d)
+ if not_tarball(d):
+ return
+
+ source_tar_name = archive_sources(d,stage_name)
+ if stage_name == "prepatch":
+ if d.getVar('PATCHES_ARCHIVE_WITH_SERIES',True).upper() == 'TRUE':
+ patch_tar_name = select_archive_patches(d,"all")
+ elif d.getVar('PATCHES_ARCHIVE_WITH_SERIES',True).upper() == 'FALSE':
+ patch_tar_name = select_archive_patches(d,"applying")
+ else:
+ bb.fatal("Please define 'PATCHES_ARCHIVE_WITH_SERIES' is strings 'True' or 'False' ")
+ else:
+ patch_tar_name = ''
+
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() not in 'SRPM':
+ move_tarball_deploy(d,[source_tar_name,patch_tar_name])
+ else:
+ tarpackage = os.path.join(d.getVar('WORKDIR', True),'tar-package')
+ if os.path.exists(tarpackage):
+ os.remove(tarpackage)
+ for package in os.path.basename(source_tar_name), patch_tar_name:
+ if package:
+ store_package(d,str(package) + ' ')
+
+def archive_scripts_logs(d):
+ '''archive scripts and logs. scripts include .bb and .inc files and logs include stuff in "temp".'''
+
+ work_dir = d.getVar('WORKDIR', True)
+ temp_dir = os.path.join(work_dir,'temp')
+ source_archive_log_with_scripts = d.getVar('SOURCE_ARCHIVE_LOG_WITH_SCRIPTS', True)
+ if source_archive_log_with_scripts == 'logs_with_scripts':
+ logdir = get_bb_inc(d)
+ tarlog = archive_logs(d,logdir,True)
+ elif source_archive_log_with_scripts == 'logs':
+ if os.path.exists(temp_dir):
+ tarlog = archive_logs(d,temp_dir,False)
+ else:
+ return
+
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() not in 'SRPM':
+ move_tarball_deploy(d,[tarlog])
+
+ else:
+ store_package(d,tarlog)
+
+def dumpdata(d):
+ '''dump environment to "${P}-${PR}.showdata.dump" including all kinds of variables and functions when running a task'''
+ workdir = bb.data.getVar('WORKDIR', d, 1)
+ distro = bb.data.getVar('DISTRO', d, 1)
+ s = d.getVar('S', True)
+ pf = d.getVar('PF', True)
+ target_sys = d.getVar('TARGET_SYS', True)
+ licenses = get_licenses(d)
+ dumpdir = d.getVar('DEPLOY_DIR', True) + '/sources/' + target_sys + '/' + licenses + '/' + pf
+ if not os.path.exists(dumpdir):
+ bb.mkdirhier(dumpdir)
+
+ dumpfile = os.path.join(dumpdir, bb.data.expand("${P}-${PR}.showdata.dump",d))
+
+ bb.note("Dumping metadata into '%s'" % dumpfile)
+ f = open(dumpfile, "w")
+ # emit variables and shell functions
+ bb.data.emit_env(f, d, True)
+ # emit the metadata which isnt valid shell
+ for e in d.keys():
+ if bb.data.getVarFlag(e, 'python', d):
+ f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
+ f.close()
+
+def create_diff_gz(d):
+ '''creating .diff.gz in ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.g gz for mapping all content in 's' including patches to xxx.diff.gz'''
+ import shutil
+
+ work_dir = d.getVar('WORKDIR', True)
+ exclude_from = d.getVar('ARCHIVE_EXCLUDE_FROM', True).split()
+ pf = d.getVar('PF', True)
+ licenses = get_licenses(d)
+ target_sys = d.getVar('TARGET_SYS', True)
+ diff_dir = d.getVar('DEPLOY_DIR', True) + '/sources/' + target_sys + '/' + licenses + '/' + pf
+ diff_file = os.path.join(diff_dir, bb.data.expand("${P}-${PR}.diff.gz",d))
+
+ f = open(os.path.join(work_dir,'temp/exclude-from-file'), 'a')
+ for i in exclude_from:
+ f.write(i)
+ f.write("\n")
+ f.close()
+
+ s=d.getVar('S', True)
+ distro = d.getVar('DISTRO',True)
+ dest = s + '/' + distro + '/files'
+ if not os.path.exists(dest):
+ bb.mkdirhier(dest)
+ for i in os.listdir(os.getcwd()):
+ if os.path.isfile(i):
+ try:
+ shutil.copy(i, dest)
+ except IOError:
+ os.system('fakeroot cp -rf ' + i + " " + dest )
+
+ bb.note("Creating .diff.gz in ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.gz")
+ cmd = "LC_ALL=C TZ=UTC0 diff --exclude-from=" + work_dir + "/temp/exclude-from-file -Naur " + s + '.org' + ' ' + s + " | gzip -c > " + diff_file
+ d.setVar('DIFF', cmd + "\n")
+ d.setVarFlag('DIFF', 'func', '1')
+ bb.build.exec_func('DIFF', d)
+ shutil.rmtree(s + '.org', ignore_errors=True)
+
+# This function will run when user want to get tarball for sources and patches after do_unpack
+python do_archive_original_sources_patches(){
+ archive_sources_patches(d,'prepatch')
+}
+
+# This function will run when user want to get tarball for patched sources after do_patch
+python do_archive_patched_sources(){
+ archive_sources_patches(d,'patched')
+}
+
+# This function will run when user want to get tarball for configured sources after do_configure
+python do_archive_configured_sources(){
+ archive_sources_patches(d,'configured')
+}
+
+# This function will run when user want to get tarball for logs or both logs and scripts(.bb and .inc files)
+python do_archive_scripts_logs(){
+ archive_scripts_logs(d)
+}
+
+# This function will run when user want to know what variable and functions in a running task are and also can get a diff file including
+# all content a package should include.
+python do_dumpdata_create_diff_gz(){
+ dumpdata(d)
+ create_diff_gz(d)
+}
+
+# This functions prepare for archiving "linux-yocto" because this package create directory 's' before do_patch instead of after do_unpack.
+# This is special control for archiving linux-yocto only.
+python do_archive_linux_yocto(){
+ s = d.getVar('S', True)
+ if 'linux-yocto' in s:
+ source_tar_name = archive_sources(d,'')
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() not in 'SRPM':
+ move_tarball_deploy(d,[source_tar_name,''])
+}
+do_kernel_checkout[postfuncs] += "do_archive_linux_yocto "
+
+# remove tarball for sources, patches and logs after creating srpm.
+python do_remove_tarball(){
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ work_dir = d.getVar('WORKDIR', True)
+ try:
+ for file in os.listdir(os.getcwd()):
+ if file in get_package(d):
+ os.remove(file)
+ os.remove(os.path.join(work_dir,'tar-package'))
+ except (TypeError,OSError):
+ pass
+}
+do_remove_taball[deptask] = "do_archive_scripts_logs"
+do_package_write_rpm[postfuncs] += "do_remove_tarball "
+export get_licenses
+export get_package
diff --git a/meta/classes/autotools.bbclass b/meta/classes/autotools.bbclass
index 365258f65..941c06d03 100644
--- a/meta/classes/autotools.bbclass
+++ b/meta/classes/autotools.bbclass
@@ -1,72 +1,87 @@
-inherit base
-
-# use autotools_stage_all for native packages
-AUTOTOOLS_NATIVE_STAGE_INSTALL = "1"
-
def autotools_dep_prepend(d):
- import bb;
-
- if bb.data.getVar('INHIBIT_AUTOTOOLS_DEPS', d, 1):
+ if d.getVar('INHIBIT_AUTOTOOLS_DEPS', True):
return ''
- pn = bb.data.getVar('PN', d, 1)
+ pn = d.getVar('PN', True)
deps = ''
- if pn in ['autoconf-native', 'automake-native']:
+ if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
return deps
deps += 'autoconf-native automake-native '
- if not pn in ['libtool', 'libtool-native', 'libtool-cross']:
+ if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
deps += 'libtool-native '
if not bb.data.inherits_class('native', d) \
+ and not bb.data.inherits_class('nativesdk', d) \
and not bb.data.inherits_class('cross', d) \
- and not bb.data.getVar('INHIBIT_DEFAULT_DEPS', d, 1):
+ and not d.getVar('INHIBIT_DEFAULT_DEPS', True):
deps += 'libtool-cross '
return deps + 'gnu-config-native '
EXTRA_OEMAKE = ""
+
DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
+
+inherit siteinfo
+
+# Space separated list of shell scripts with variables defined to supply test
+# results for autoconf tests we cannot run at build time.
+export CONFIG_SITE = "${@siteinfo_get_files(d)}"
+
acpaths = "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
+export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
+
def autotools_set_crosscompiling(d):
- import bb
if not bb.data.inherits_class('native', d):
return " cross_compiling=yes"
return ""
+def append_libtool_sysroot(d):
+ # Only supply libtool sysroot option for non-native packages
+ if not bb.data.inherits_class('native', d):
+ return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
+ return ""
+
# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
+CONFIGUREOPTS = " --build=${BUILD_SYS} \
+ --host=${HOST_SYS} \
+ --target=${TARGET_SYS} \
+ --prefix=${prefix} \
+ --exec_prefix=${exec_prefix} \
+ --bindir=${bindir} \
+ --sbindir=${sbindir} \
+ --libexecdir=${libexecdir} \
+ --datadir=${datadir} \
+ --sysconfdir=${sysconfdir} \
+ --sharedstatedir=${sharedstatedir} \
+ --localstatedir=${localstatedir} \
+ --libdir=${libdir} \
+ --includedir=${includedir} \
+ --oldincludedir=${oldincludedir} \
+ --infodir=${infodir} \
+ --mandir=${mandir} \
+ --disable-silent-rules \
+ ${CONFIGUREOPT_DEPTRACK} \
+ ${@append_libtool_sysroot(d)}"
+CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking"
+
+
oe_runconf () {
- if [ -x ${S}/configure ] ; then
- cfgcmd="${S}/configure \
- --build=${BUILD_SYS} \
- --host=${HOST_SYS} \
- --target=${TARGET_SYS} \
- --prefix=${prefix} \
- --exec_prefix=${exec_prefix} \
- --bindir=${bindir} \
- --sbindir=${sbindir} \
- --libexecdir=${libexecdir} \
- --datadir=${datadir} \
- --sysconfdir=${sysconfdir} \
- --sharedstatedir=${sharedstatedir} \
- --localstatedir=${localstatedir} \
- --libdir=${libdir} \
- --includedir=${includedir} \
- --oldincludedir=${oldincludedir} \
- --infodir=${infodir} \
- --mandir=${mandir} \
- ${EXTRA_OECONF} \
- $@"
- oenote "Running $cfgcmd..."
- $cfgcmd || oefatal "oe_runconf failed"
+ cfgscript="${S}/configure"
+ if [ -x "$cfgscript" ] ; then
+ bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
+ ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@" || bbfatal "oe_runconf failed"
else
- oefatal "no configure script found"
+ bbfatal "no configure script found at $cfgscript"
fi
}
+AUTOTOOLS_AUXDIR ?= "${S}"
+
autotools_do_configure() {
case ${PN} in
autoconf*)
@@ -88,6 +103,8 @@ autotools_do_configure() {
if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
olddir=`pwd`
cd ${S}
+ # Remove any previous copy of the m4 macros
+ rm -rf ${B}/aclocal-copy/
if [ x"${acpaths}" = xdefault ]; then
acpaths=
for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
@@ -100,9 +117,19 @@ autotools_do_configure() {
AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
automake --version
echo "AUTOV is $AUTOV"
- install -d ${STAGING_DATADIR}/aclocal
- install -d ${STAGING_DATADIR}/aclocal-$AUTOV
- acpaths="$acpaths -I${STAGING_DATADIR}/aclocal-$AUTOV -I ${STAGING_DATADIR}/aclocal"
+ if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
+ acpaths="$acpaths -I${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
+ fi
+ # The aclocal directory could get modified by other processes
+ # uninstalling data from the sysroot. See Yocto #861 for details.
+ # We avoid this by taking a copy here and then files cannot disappear.
+ if [ -d ${STAGING_DATADIR}/aclocal ]; then
+ mkdir -p ${B}/aclocal-copy/
+ # for scratch build this directory can be empty
+ # so avoid cp's no files to copy error
+ cp -r ${STAGING_DATADIR}/aclocal/. ${B}/aclocal-copy/
+ acpaths="$acpaths -I ${B}/aclocal-copy/"
+ fi
# autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
# like it was auto-generated. Work around this by blowing it away
# by hand, unless the package specifically asked not to run aclocal.
@@ -114,21 +141,30 @@ autotools_do_configure() {
else
CONFIGURE_AC=configure.ac
fi
- if grep "^AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
- if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
- : do nothing -- we still have an old unmodified configure.ac
- else
- oenote Executing glib-gettextize --force --copy
- echo "no" | glib-gettextize --force --copy
+ if ! echo ${EXTRA_OECONF} | grep -q "\-\-disable-nls"; then
+ if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
+ : do nothing -- we still have an old unmodified configure.ac
+ else
+ bbnote Executing glib-gettextize --force --copy
+ echo "no" | glib-gettextize --force --copy
+ fi
+ else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
+ # We'd call gettextize here if it wasn't so broken...
+ cp ${STAGING_DATADIR}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
+ if [ -d ${S}/po/ -a ! -e ${S}/po/Makefile.in.in ]; then
+ cp ${STAGING_DATADIR}/gettext/po/Makefile.in.in ${S}/po/
+ fi
fi
fi
- if grep "^[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
- oenote Executing intltoolize --copy --force --automake
- intltoolize --copy --force --automake
fi
- oenote Executing autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
mkdir -p m4
- autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || oefatal "autoreconf execution failed."
+ if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
+ bbnote Executing intltoolize --copy --force --automake
+ intltoolize --copy --force --automake
+ fi
+ bbnote Executing autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
+ autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed."
cd $olddir
fi
;;
@@ -136,93 +172,18 @@ autotools_do_configure() {
if [ -e ${S}/configure ]; then
oe_runconf
else
- oenote "nothing to configure"
+ bbnote "nothing to configure"
fi
}
autotools_do_install() {
oe_runmake 'DESTDIR=${D}' install
-
- for i in `find ${D} -name "*.la"` ; do \
- sed -i -e s:${STAGING_LIBDIR}:${libdir}:g $i
- sed -i -e s:${D}::g $i
- sed -i -e 's:-I${WORKDIR}\S*: :g' $i
- sed -i -e 's:-L${WORKDIR}\S*: :g' $i
- done
-}
-
-STAGE_TEMP="${WORKDIR}/temp-staging"
-
-autotools_stage_includes() {
- if [ "${INHIBIT_AUTO_STAGE_INCLUDES}" != "1" ]
- then
- rm -rf ${STAGE_TEMP}
- mkdir -p ${STAGE_TEMP}
- make DESTDIR="${STAGE_TEMP}" install
- cp -pPR ${STAGE_TEMP}/${includedir}/* ${STAGING_INCDIR}
- rm -rf ${STAGE_TEMP}
+ # Info dir listing isn't interesting at this point so remove it if it exists.
+ if [ -e "${D}${infodir}/dir" ]; then
+ rm -f ${D}${infodir}/dir
fi
}
-autotools_stage_dir() {
- from="$1"
- to="$2"
- # This will remove empty directories so we can ignore them
- rmdir "$from" 2> /dev/null || true
- if [ -d "$from" ]; then
- mkdir -p "$to"
- cp -fpPR "$from"/* "$to"
- fi
-}
-
-autotools_stage_libdir() {
- from="$1"
- to="$2"
-
- olddir=`pwd`
- cd $from
- las=$(find . -name \*.la -type f)
- cd $olddir
- echo "Found la files: $las"
- for i in $las
- do
- sed -e 's/^installed=yes$/installed=no/' \
- -e '/^dependency_libs=/s,${WORKDIR}[[:alnum:]/\._+-]*/\([[:alnum:]\._+-]*\),${STAGING_LIBDIR}/\1,g' \
- -e "/^dependency_libs=/s,\([[:space:]']\)${libdir},\1${STAGING_LIBDIR},g" \
- -i $from/$i
- done
- autotools_stage_dir $from $to
-}
-
-
-autotools_stage_all() {
- if [ "${INHIBIT_AUTO_STAGE}" = "1" ]
- then
- return
- fi
- rm -rf ${STAGE_TEMP}
- mkdir -p ${STAGE_TEMP}
- oe_runmake DESTDIR="${STAGE_TEMP}" install
- autotools_stage_dir ${STAGE_TEMP}/${includedir} ${STAGING_INCDIR}
- if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
- autotools_stage_dir ${STAGE_TEMP}/${bindir} ${STAGING_DIR_HOST}${layout_bindir}
- autotools_stage_dir ${STAGE_TEMP}/${sbindir} ${STAGING_DIR_HOST}${layout_sbindir}
- autotools_stage_dir ${STAGE_TEMP}/${base_bindir} ${STAGING_DIR_HOST}${layout_base_bindir}
- autotools_stage_dir ${STAGE_TEMP}/${base_sbindir} ${STAGING_DIR_HOST}${layout_base_sbindir}
- autotools_stage_dir ${STAGE_TEMP}/${libexecdir} ${STAGING_DIR_HOST}${layout_libexecdir}
- fi
- if [ -d ${STAGE_TEMP}/${libdir} ]
- then
- autotools_stage_libdir ${STAGE_TEMP}/${libdir} ${STAGING_LIBDIR}
- fi
- if [ -d ${STAGE_TEMP}/${base_libdir} ]
- then
- autotools_stage_libdir ${STAGE_TEMP}/${base_libdir} ${STAGING_DIR_HOST}${layout_base_libdir}
- fi
- rm -rf ${STAGE_TEMP}/${mandir} || true
- rm -rf ${STAGE_TEMP}/${infodir} || true
- autotools_stage_dir ${STAGE_TEMP}/${datadir} ${STAGING_DATADIR}
- #rm -rf ${STAGE_TEMP}
-}
+inherit siteconfig
EXPORT_FUNCTIONS do_configure do_install
diff --git a/meta/classes/autotools_stage.bbclass b/meta/classes/autotools_stage.bbclass
index 010117c25..b3c41e4b4 100644
--- a/meta/classes/autotools_stage.bbclass
+++ b/meta/classes/autotools_stage.bbclass
@@ -1,5 +1,2 @@
inherit autotools
-do_stage () {
- autotools_stage_all
-} \ No newline at end of file
diff --git a/meta/classes/base.bbclass b/meta/classes/base.bbclass
index 3704cce01..bb39b7b0b 100644
--- a/meta/classes/base.bbclass
+++ b/meta/classes/base.bbclass
@@ -1,91 +1,55 @@
BB_DEFAULT_TASK ?= "build"
+CLASSOVERRIDE ?= "class-target"
-# like os.path.join but doesn't treat absolute RHS specially
-def base_path_join(a, *p):
- path = a
- for b in p:
- if path == '' or path.endswith('/'):
- path += b
- else:
- path += '/' + b
- return path
+inherit patch
+inherit staging
-# for MD5/SHA handling
-def base_chk_load_parser(config_path):
- import ConfigParser, os, bb
- parser = ConfigParser.ConfigParser()
- if not len(parser.read(config_path)) == 1:
- bb.note("Can not open the '%s' ini file" % config_path)
- raise Exception("Can not open the '%s'" % config_path)
+inherit mirrors
+inherit utils
+inherit utility-tasks
+inherit metadata_scm
+inherit logging
- return parser
+OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.packagegroup oe.sstatesig"
+OE_IMPORTS[type] = "list"
-def base_chk_file(parser, pn, pv, src_uri, localpath, data):
- import os, bb
- no_checksum = False
- # Try PN-PV-SRC_URI first and then try PN-SRC_URI
- # we rely on the get method to create errors
- pn_pv_src = "%s-%s-%s" % (pn,pv,src_uri)
- pn_src = "%s-%s" % (pn,src_uri)
- if parser.has_section(pn_pv_src):
- md5 = parser.get(pn_pv_src, "md5")
- sha256 = parser.get(pn_pv_src, "sha256")
- elif parser.has_section(pn_src):
- md5 = parser.get(pn_src, "md5")
- sha256 = parser.get(pn_src, "sha256")
- elif parser.has_section(src_uri):
- md5 = parser.get(src_uri, "md5")
- sha256 = parser.get(src_uri, "sha256")
- else:
- no_checksum = True
+def oe_import(d):
+ import os, sys
- # md5 and sha256 should be valid now
- if not os.path.exists(localpath):
- bb.note("The localpath does not exist '%s'" % localpath)
- raise Exception("The path does not exist '%s'" % localpath)
+ bbpath = d.getVar("BBPATH", True).split(":")
+ sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
+ def inject(name, value):
+ """Make a python object accessible from the metadata"""
+ if hasattr(bb.utils, "_context"):
+ bb.utils._context[name] = value
+ else:
+ __builtins__[name] = value
- # Calculate the MD5 and 256-bit SHA checksums
- md5data = bb.utils.md5_file(localpath)
- shadata = bb.utils.sha256_file(localpath)
+ import oe.data
+ for toimport in oe.data.typed_value("OE_IMPORTS", d):
+ imported = __import__(toimport)
+ inject(toimport.split(".", 1)[0], imported)
- # sha256_file() can return None if we are running on Python 2.4 (hashlib is
- # 2.5 onwards, sha in 2.4 is 160-bit only), so check for this and call the
- # standalone shasum binary if required.
- if shadata is None:
- try:
- shapipe = os.popen('PATH=%s oe_sha256sum %s' % (bb.data.getVar('PATH', data, True), localpath))
- shadata = (shapipe.readline().split() or [ "" ])[0]
- shapipe.close()
- except OSError:
- raise Exception("Executing shasum failed, please build shasum-native")
-
- if no_checksum == True: # we do not have conf/checksums.ini entry
- try:
- file = open("%s/checksums.ini" % bb.data.getVar("TMPDIR", data, 1), "a")
- except:
- return False
-
- if not file:
- raise Exception("Creating checksums.ini failed")
-
- file.write("[%s]\nmd5=%s\nsha256=%s\n\n" % (src_uri, md5data, shadata))
- file.close()
- return False
+python oe_import_eh () {
+ if isinstance(e, bb.event.ConfigParsed):
+ oe_import(e.data)
+}
- if not md5 == md5data:
- bb.note("The MD5Sums did not match. Wanted: '%s' and Got: '%s'" % (md5,md5data))
- raise Exception("MD5 Sums do not match. Wanted: '%s' Got: '%s'" % (md5, md5data))
+addhandler oe_import_eh
- if not sha256 == shadata:
- bb.note("The SHA256 Sums do not match. Wanted: '%s' Got: '%s'" % (sha256,shadata))
- raise Exception("SHA256 Sums do not match. Wanted: '%s' Got: '%s'" % (sha256, shadata))
+die() {
+ bbfatal "$*"
+}
- return True
+oe_runmake() {
+ if [ x"$MAKE" = x ]; then MAKE=make; fi
+ bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
+ ${MAKE} ${EXTRA_OEMAKE} "$@" || die "oe_runmake failed"
+}
def base_dep_prepend(d):
- import bb
#
# Ideally this will check a flag so we will operate properly in
# the case where host == build == target, for now we don't work in
@@ -93,724 +57,73 @@ def base_dep_prepend(d):
#
deps = ""
-
- # bb.utils.sha256_file() will return None on Python 2.4 because hashlib
- # isn't present. In this case we use a shasum-native to checksum, so if
- # hashlib isn't present then add shasum-native to the dependencies.
- try:
- import hashlib
- except ImportError:
- # Adding shasum-native as a dependency of shasum-native would be
- # stupid, so don't do that.
- if bb.data.getVar('PN', d, True) != "shasum-native":
- deps = "shasum-native "
-
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
- if not bb.data.getVar('INHIBIT_DEFAULT_DEPS', d):
- if (bb.data.getVar('HOST_SYS', d, 1) !=
- bb.data.getVar('BUILD_SYS', d, 1)):
- deps += " virtual/${TARGET_PREFIX}gcc virtual/libc "
+ if not d.getVar('INHIBIT_DEFAULT_DEPS'):
+ if (d.getVar('HOST_SYS', True) !=
+ d.getVar('BUILD_SYS', True)):
+ deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
return deps
-def base_read_file(filename):
- import bb
- try:
- f = file( filename, "r" )
- except IOError, reason:
- return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
- else:
- return f.read().strip()
- return None
+BASEDEPENDS = "${@base_dep_prepend(d)}"
-def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
- import bb
- if bb.data.getVar(variable,d,1) == checkvalue:
- return truevalue
- else:
- return falsevalue
-
-def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- import bb
- if float(bb.data.getVar(variable,d,1)) <= float(checkvalue):
- return truevalue
- else:
- return falsevalue
-
-def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
- import bb
- result = bb.vercmp(bb.data.getVar(variable,d,True), checkvalue)
- if result <= 0:
- return truevalue
- else:
- return falsevalue
-
-def base_contains(variable, checkvalues, truevalue, falsevalue, d):
- import bb
- matches = 0
- if type(checkvalues).__name__ == "str":
- checkvalues = [checkvalues]
- for value in checkvalues:
- if bb.data.getVar(variable,d,1).find(value) != -1:
- matches = matches + 1
- if matches == len(checkvalues):
- return truevalue
- return falsevalue
-
-def base_both_contain(variable1, variable2, checkvalue, d):
- import bb
- if bb.data.getVar(variable1,d,1).find(checkvalue) != -1 and bb.data.getVar(variable2,d,1).find(checkvalue) != -1:
- return checkvalue
- else:
- return ""
-
-DEPENDS_prepend="${@base_dep_prepend(d)} "
-
-def base_prune_suffix(var, suffixes, d):
- # See if var ends with any of the suffixes listed and
- # remove it if found
- for suffix in suffixes:
- if var.endswith(suffix):
- return var.replace(suffix, "")
- return var
-
-def base_set_filespath(path, d):
- import os, bb
- filespath = []
- # The ":" ensures we have an 'empty' override
- overrides = (bb.data.getVar("OVERRIDES", d, 1) or "") + ":"
- for p in path:
- for o in overrides.split(":"):
- filespath.append(os.path.join(p, o))
- return ":".join(filespath)
+DEPENDS_prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath([ "${FILE_DIRNAME}/${PF}", "${FILE_DIRNAME}/${P}", "${FILE_DIRNAME}/${PN}", "${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files", "${FILE_DIRNAME}" ], d)}"
-
-def oe_filter(f, str, d):
- from re import match
- return " ".join(filter(lambda x: match(f, x, 0), str.split()))
-
-def oe_filter_out(f, str, d):
- from re import match
- return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
-
-die() {
- oefatal "$*"
-}
-
-oenote() {
- echo "NOTE:" "$*"
-}
-
-oewarn() {
- echo "WARNING:" "$*"
-}
-
-oefatal() {
- echo "FATAL:" "$*"
- exit 1
-}
-
-oedebug() {
- test $# -ge 2 || {
- echo "Usage: oedebug level \"message\""
- exit 1
- }
-
- test ${OEDEBUG:-0} -ge $1 && {
- shift
- echo "DEBUG:" $*
- }
-}
-
-oe_runmake() {
- if [ x"$MAKE" = x ]; then MAKE=make; fi
- oenote ${MAKE} ${EXTRA_OEMAKE} "$@"
- ${MAKE} ${EXTRA_OEMAKE} "$@" || die "oe_runmake failed"
-}
-
-oe_soinstall() {
- # Purpose: Install shared library file and
- # create the necessary links
- # Example:
- #
- # oe_
- #
- #oenote installing shared library $1 to $2
- #
- libname=`basename $1`
- install -m 755 $1 $2/$libname
- sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
- solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
- ln -sf $libname $2/$sonamelink
- ln -sf $libname $2/$solink
-}
-
-oe_libinstall() {
- # Purpose: Install a library, in all its forms
- # Example
- #
- # oe_libinstall libltdl ${STAGING_LIBDIR}/
- # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
- dir=""
- libtool=""
- silent=""
- require_static=""
- require_shared=""
- staging_install=""
- while [ "$#" -gt 0 ]; do
- case "$1" in
- -C)
- shift
- dir="$1"
- ;;
- -s)
- silent=1
- ;;
- -a)
- require_static=1
- ;;
- -so)
- require_shared=1
- ;;
- -*)
- oefatal "oe_libinstall: unknown option: $1"
- ;;
- *)
- break;
- ;;
- esac
- shift
- done
-
- libname="$1"
- shift
- destpath="$1"
- if [ -z "$destpath" ]; then
- oefatal "oe_libinstall: no destination path specified"
- fi
- if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
- then
- staging_install=1
- fi
-
- __runcmd () {
- if [ -z "$silent" ]; then
- echo >&2 "oe_libinstall: $*"
- fi
- $*
- }
-
- if [ -z "$dir" ]; then
- dir=`pwd`
- fi
-
- dotlai=$libname.lai
-
- # Sanity check that the libname.lai is unique
- number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
- if [ $number_of_files -gt 1 ]; then
- oefatal "oe_libinstall: $dotlai is not unique in $dir"
- fi
-
-
- dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
- olddir=`pwd`
- __runcmd cd $dir
-
- lafile=$libname.la
-
- # If such file doesn't exist, try to cut version suffix
- if [ ! -f "$lafile" ]; then
- libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
- lafile1=$libname.la
- if [ -f "$lafile1" ]; then
- libname=$libname1
- lafile=$lafile1
- fi
- fi
-
- if [ -f "$lafile" ]; then
- # libtool archive
- eval `cat $lafile|grep "^library_names="`
- libtool=1
- else
- library_names="$libname.so* $libname.dll.a"
- fi
-
- __runcmd install -d $destpath/
- dota=$libname.a
- if [ -f "$dota" -o -n "$require_static" ]; then
- rm -f $destpath/$dota
- __runcmd install -m 0644 $dota $destpath/
- fi
- if [ -f "$dotlai" -a -n "$libtool" ]; then
- if test -n "$staging_install"
- then
- # stop libtool using the final directory name for libraries
- # in staging:
- __runcmd rm -f $destpath/$libname.la
- __runcmd sed -e 's/^installed=yes$/installed=no/' \
- -e '/^dependency_libs=/s,${WORKDIR}[[:alnum:]/\._+-]*/\([[:alnum:]\._+-]*\),${STAGING_LIBDIR}/\1,g' \
- -e "/^dependency_libs=/s,\([[:space:]']\)${libdir},\1${STAGING_LIBDIR},g" \
- $dotlai >$destpath/$libname.la
- else
- rm -f $destpath/$libname.la
- __runcmd install -m 0644 $dotlai $destpath/$libname.la
- fi
- fi
-
- for name in $library_names; do
- files=`eval echo $name`
- for f in $files; do
- if [ ! -e "$f" ]; then
- if [ -n "$libtool" ]; then
- oefatal "oe_libinstall: $dir/$f not found."
- fi
- elif [ -L "$f" ]; then
- __runcmd cp -P "$f" $destpath/
- elif [ ! -L "$f" ]; then
- libfile="$f"
- rm -f $destpath/$libfile
- __runcmd install -m 0755 $libfile $destpath/
- fi
- done
- done
-
- if [ -z "$libfile" ]; then
- if [ -n "$require_shared" ]; then
- oefatal "oe_libinstall: unable to locate shared library"
- fi
- elif [ -z "$libtool" ]; then
- # special case hack for non-libtool .so.#.#.# links
- baselibfile=`basename "$libfile"`
- if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
- sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
- solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
- if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
- __runcmd ln -sf $baselibfile $destpath/$sonamelink
- fi
- __runcmd ln -sf $baselibfile $destpath/$solink
- fi
- fi
-
- __runcmd cd "$olddir"
-}
-
-def package_stagefile(file, d):
- import bb, os
-
- if bb.data.getVar('PSTAGING_ACTIVE', d, True) == "1":
- destfile = file.replace(bb.data.getVar("TMPDIR", d, 1), bb.data.getVar("PSTAGE_TMPDIR_STAGE", d, 1))
- bb.mkdirhier(os.path.dirname(destfile))
- #print "%s to %s" % (file, destfile)
- bb.copyfile(file, destfile)
-
-package_stagefile_shell() {
- if [ "$PSTAGING_ACTIVE" = "1" ]; then
- srcfile=$1
- destfile=`echo $srcfile | sed s#${TMPDIR}#${PSTAGE_TMPDIR_STAGE}#`
- destdir=`dirname $destfile`
- mkdir -p $destdir
- cp -dp $srcfile $destfile
- fi
-}
-
-oe_machinstall() {
- # Purpose: Install machine dependent files, if available
- # If not available, check if there is a default
- # If no default, just touch the destination
- # Example:
- # $1 $2 $3 $4
- # oe_machinstall -m 0644 fstab ${D}/etc/fstab
- #
- # TODO: Check argument number?
- #
- filename=`basename $3`
- dirname=`dirname $3`
-
- for o in `echo ${OVERRIDES} | tr ':' ' '`; do
- if [ -e $dirname/$o/$filename ]; then
- oenote $dirname/$o/$filename present, installing to $4
- install $1 $2 $dirname/$o/$filename $4
- return
- fi
- done
-# oenote overrides specific file NOT present, trying default=$3...
- if [ -e $3 ]; then
- oenote $3 present, installing to $4
- install $1 $2 $3 $4
- else
- oenote $3 NOT present, touching empty $4
- touch $4
- fi
-}
-
-addtask listtasks
-do_listtasks[nostamp] = "1"
-python do_listtasks() {
- import sys
- # emit variables and shell functions
- #bb.data.emit_env(sys.__stdout__, d)
- # emit the metadata which isnt valid shell
- for e in d.keys():
- if bb.data.getVarFlag(e, 'task', d):
- sys.__stdout__.write("%s\n" % e)
-}
-
-addtask clean
-do_clean[dirs] = "${TOPDIR}"
-do_clean[nostamp] = "1"
-python base_do_clean() {
- """clear the build and temp directories"""
- dir = bb.data.expand("${WORKDIR}", d)
- if dir == '//': raise bb.build.FuncFailed("wrong DATADIR")
- bb.note("removing " + dir)
- os.system('rm -rf ' + dir)
-
- dir = "%s.*" % bb.data.expand(bb.data.getVar('STAMP', d), d)
- bb.note("removing " + dir)
- os.system('rm -f '+ dir)
-}
-
-addtask rebuild after do_${BB_DEFAULT_TASK}
-addtask rebuild
-do_rebuild[dirs] = "${TOPDIR}"
-do_rebuild[nostamp] = "1"
-python base_do_rebuild() {
- """rebuild a package"""
-}
-
-#addtask mrproper
-#do_mrproper[dirs] = "${TOPDIR}"
-#do_mrproper[nostamp] = "1"
-#python base_do_mrproper() {
-# """clear downloaded sources, build and temp directories"""
-# dir = bb.data.expand("${DL_DIR}", d)
-# if dir == '/': bb.build.FuncFailed("wrong DATADIR")
-# bb.debug(2, "removing " + dir)
-# os.system('rm -rf ' + dir)
-# bb.build.exec_func('do_clean', d)
-#}
-
-SCENEFUNCS += "base_scenefunction"
-
-python base_do_setscene () {
- for f in (bb.data.getVar('SCENEFUNCS', d, 1) or '').split():
- bb.build.exec_func(f, d)
- if not os.path.exists(bb.data.getVar('STAMP', d, 1) + ".do_setscene"):
- bb.build.make_stamp("do_setscene", d)
-}
-do_setscene[selfstamp] = "1"
-addtask setscene before do_fetch
-
-python base_scenefunction () {
- stamp = bb.data.getVar('STAMP', d, 1) + ".needclean"
- if os.path.exists(stamp):
- bb.build.exec_func("do_clean", d)
-}
-
+# THISDIR only works properly with imediate expansion as it has to run
+# in the context of the location its used (:=)
+THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
python base_do_fetch() {
- import sys
-
- localdata = bb.data.createCopy(d)
- bb.data.update_data(localdata)
-
- src_uri = bb.data.getVar('SRC_URI', localdata, 1)
- if not src_uri:
- return 1
-
- try:
- bb.fetch.init(src_uri.split(),d)
- except bb.fetch.NoMethodError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("No method: %s" % value)
-
- try:
- bb.fetch.go(localdata)
- except bb.fetch.MissingParameterError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Missing parameters: %s" % value)
- except bb.fetch.FetchError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Fetch failed: %s" % value)
- except bb.fetch.MD5SumError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("MD5 failed: %s" % value)
- except:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Unknown fetch Error: %s" % value)
-
- # Verify the SHA and MD5 sums we have in OE and check what do
- # in
- check_sum = bb.which(bb.data.getVar('BBPATH', d, True), "conf/checksums.ini")
- if not check_sum:
- bb.note("No conf/checksums.ini found, not checking checksums")
- return
-
- try:
- parser = base_chk_load_parser(check_sum)
- except:
- bb.note("Creating the CheckSum parser failed")
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
return
- pv = bb.data.getVar('PV', d, True)
- pn = bb.data.getVar('PN', d, True)
-
- # Check each URI
- for url in src_uri.split():
- localpath = bb.data.expand(bb.fetch.localpath(url, localdata), localdata)
- (type,host,path,_,_,_) = bb.decodeurl(url)
- uri = "%s://%s%s" % (type,host,path)
- try:
- if type == "http" or type == "https" or type == "ftp" or type == "ftps":
- if not base_chk_file(parser, pn, pv,uri, localpath, d):
- bb.note("%s-%s: %s has no entry in conf/checksums.ini, not checking URI" % (pn,pv,uri))
- except Exception:
- raise bb.build.FuncFailed("Checksum of '%s' failed" % uri)
-}
-
-addtask fetchall after do_fetch
-do_fetchall[recrdeptask] = "do_fetch"
-base_do_fetchall() {
- :
-}
-
-addtask checkuri
-do_checkuri[nostamp] = "1"
-python do_checkuri() {
- import sys
-
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
- src_uri = bb.data.getVar('SRC_URI', localdata, 1)
-
- try:
- bb.fetch.init(src_uri.split(),d)
- except bb.fetch.NoMethodError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("No method: %s" % value)
-
- try:
- bb.fetch.checkstatus(localdata)
- except bb.fetch.MissingParameterError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Missing parameters: %s" % value)
- except bb.fetch.FetchError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Fetch failed: %s" % value)
- except bb.fetch.MD5SumError:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("MD5 failed: %s" % value)
- except:
- (type, value, traceback) = sys.exc_info()
- raise bb.build.FuncFailed("Unknown fetch Error: %s" % value)
-}
-
-addtask checkuriall after do_checkuri
-do_checkuriall[recrdeptask] = "do_checkuri"
-do_checkuriall[nostamp] = "1"
-base_do_checkuriall() {
- :
-}
-
-addtask buildall after do_build
-do_buildall[recrdeptask] = "do_build"
-base_do_buildall() {
- :
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.download()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
}
-
-def oe_unpack_file(file, data, url = None):
- import bb, os
- if not url:
- url = "file://%s" % file
- dots = file.split(".")
- if dots[-1] in ['gz', 'bz2', 'Z']:
- efile = os.path.join(bb.data.getVar('WORKDIR', data, 1),os.path.basename('.'.join(dots[0:-1])))
- else:
- efile = file
- cmd = None
- if file.endswith('.tar'):
- cmd = 'tar x --no-same-owner -f %s' % file
- elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
- cmd = 'tar xz --no-same-owner -f %s' % file
- elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'):
- cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file
- elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'):
- cmd = 'gzip -dc %s > %s' % (file, efile)
- elif file.endswith('.bz2'):
- cmd = 'bzip2 -dc %s > %s' % (file, efile)
- elif file.endswith('.zip') or file.endswith('.jar'):
- cmd = 'unzip -q -o'
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if 'dos' in parm:
- cmd = '%s -a' % cmd
- cmd = "%s '%s'" % (cmd, file)
- elif os.path.isdir(file):
- filesdir = os.path.realpath(bb.data.getVar("FILESDIR", data, 1))
- destdir = "."
- if file[0:len(filesdir)] == filesdir:
- destdir = file[len(filesdir):file.rfind('/')]
- destdir = destdir.strip('/')
- if len(destdir) < 1:
- destdir = "."
- elif not os.access("%s/%s" % (os.getcwd(), destdir), os.F_OK):
- os.makedirs("%s/%s" % (os.getcwd(), destdir))
- cmd = 'cp -pPR %s %s/%s/' % (file, os.getcwd(), destdir)
- else:
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if not 'patch' in parm:
- # The "destdir" handling was specifically done for FILESPATH
- # items. So, only do so for file:// entries.
- if type == "file":
- destdir = bb.decodeurl(url)[1] or "."
- else:
- destdir = "."
- bb.mkdirhier("%s/%s" % (os.getcwd(), destdir))
- cmd = 'cp %s %s/%s/' % (file, os.getcwd(), destdir)
-
- if not cmd:
- return True
-
- dest = os.path.join(os.getcwd(), os.path.basename(file))
- if os.path.exists(dest):
- if os.path.samefile(file, dest):
- return True
-
- # Change to subdir before executing command
- save_cwd = os.getcwd();
- parm = bb.decodeurl(url)[5]
- if 'subdir' in parm:
- newdir = ("%s/%s" % (os.getcwd(), parm['subdir']))
- bb.mkdirhier(newdir)
- os.chdir(newdir)
-
- cmd = "PATH=\"%s\" %s" % (bb.data.getVar('PATH', data, 1), cmd)
- bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
- ret = os.system(cmd)
-
- os.chdir(save_cwd)
-
- return ret == 0
-
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
+do_unpack[cleandirs] = "${S}/patches"
python base_do_unpack() {
- import re, os
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
- src_uri = bb.data.getVar('SRC_URI', localdata, True)
- if not src_uri:
- return
+ rootdir = localdata.getVar('WORKDIR', True)
- for url in src_uri.split():
- try:
- local = bb.data.expand(bb.fetch.localpath(url, localdata), localdata)
- except bb.MalformedUrl, e:
- raise FuncFailed('Unable to generate local path for malformed uri: %s' % e)
- local = os.path.realpath(local)
- ret = oe_unpack_file(local, localdata, url)
- if not ret:
- raise bb.build.FuncFailed()
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.unpack(rootdir)
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
}
-def base_get_scmbasepath(d):
- import bb
- path_to_bbfiles = bb.data.getVar( 'BBFILES', d, 1 ).split()
- return path_to_bbfiles[0][:path_to_bbfiles[0].rindex( "packages" )]
-
-def base_get_metadata_monotone_branch(d):
- monotone_branch = "<unknown>"
- try:
- monotone_branch = file( "%s/_MTN/options" % base_get_scmbasepath(d) ).read().strip()
- if monotone_branch.startswith( "database" ):
- monotone_branch_words = monotone_branch.split()
- monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
- except:
- pass
- return monotone_branch
-
-def base_get_metadata_monotone_revision(d):
- monotone_revision = "<unknown>"
- try:
- monotone_revision = file( "%s/_MTN/revision" % base_get_scmbasepath(d) ).read().strip()
- if monotone_revision.startswith( "format_version" ):
- monotone_revision_words = monotone_revision.split()
- monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
- except IOError:
- pass
- return monotone_revision
-
-def base_get_metadata_svn_revision(d):
- revision = "<unknown>"
- try:
- revision = file( "%s/.svn/entries" % base_get_scmbasepath(d) ).readlines()[3].strip()
- except IOError:
- pass
- return revision
-
-def base_get_metadata_git_branch(d):
- import os
- branch = os.popen('cd %s; git branch | grep "^* " | tr -d "* "' % base_get_scmbasepath(d)).read()
-
- if len(branch) != 0:
- return branch
- return "<unknown>"
-
-def base_get_metadata_git_revision(d):
- import os
- rev = os.popen("cd %s; git log -n 1 --pretty=oneline --" % base_get_scmbasepath(d)).read().split(" ")[0]
- if len(rev) != 0:
- return rev
- return "<unknown>"
-
-def base_detect_revision(d):
- scms = [base_get_metadata_git_revision, \
- base_get_metadata_svn_revision]
-
- for scm in scms:
- rev = scm(d)
- if rev <> "<unknown>":
- return rev
-
- return "<unknown>"
-
-def base_detect_branch(d):
- scms = [base_get_metadata_git_branch]
-
- for scm in scms:
- rev = scm(d)
- if rev <> "<unknown>":
- return rev.strip()
-
- return "<unknown>"
-
-
-
-METADATA_BRANCH ?= "${@base_detect_branch(d)}"
-METADATA_REVISION ?= "${@base_detect_revision(d)}"
-
-GIT_CONFIG = "${STAGING_DIR_NATIVE}/usr/etc/gitconfig"
+GIT_CONFIG_PATH = "${STAGING_DIR_NATIVE}/etc"
+GIT_CONFIG = "${GIT_CONFIG_PATH}/gitconfig"
def generate_git_config(e):
- import bb
- import os
from bb import data
if data.getVar('GIT_CORE_CONFIG', e.data, True):
- gitconfig_path = bb.data.getVar('GIT_CONFIG', e.data, True)
- proxy_command = " gitproxy = %s\n" % data.getVar('GIT_PROXY_COMMAND', e.data, True)
+ gitconfig_path = e.data.getVar('GIT_CONFIG', True)
+ proxy_command = " gitProxy = %s\n" % data.getVar('OE_GIT_PROXY_COMMAND', e.data, True)
- bb.mkdirhier(bb.data.expand("${STAGING_DIR_NATIVE}/usr/etc/", e.data))
+ bb.mkdirhier(e.data.expand("${GIT_CONFIG_PATH}"))
if (os.path.exists(gitconfig_path)):
os.remove(gitconfig_path)
@@ -818,88 +131,147 @@ def generate_git_config(e):
f.write("[core]\n")
ignore_hosts = data.getVar('GIT_PROXY_IGNORE', e.data, True).split()
for ignore_host in ignore_hosts:
- f.write(" gitproxy = none for %s\n" % ignore_host)
+ f.write(" gitProxy = none for %s\n" % ignore_host)
f.write(proxy_command)
f.close
-addhandler base_eventhandler
-python base_eventhandler() {
- from bb import note, error, data
- from bb.event import Handled, NotHandled, getName
- import os
+def pkgarch_mapping(d):
+ # Compatibility mappings of TUNE_PKGARCH (opt in)
+ if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
+ if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
+ d.setVar("TUNE_PKGARCH", "armv7a")
- messages = {}
- messages["Completed"] = "completed"
- messages["Succeeded"] = "completed"
- messages["Started"] = "started"
- messages["Failed"] = "failed"
+def preferred_ml_updates(d):
+ # If any PREFERRED_PROVIDER or PREFERRED_VERSIONS are set,
+ # we need to mirror these variables in the multilib case
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ if not multilibs:
+ return
- name = getName(e)
- msg = ""
- if name.startswith("Pkg"):
- msg += "package %s: " % data.getVar("P", e.data, 1)
- msg += messages.get(name[3:]) or name[3:]
- elif name.startswith("Task"):
- msg += "package %s: task %s: " % (data.getVar("PF", e.data, 1), e.task)
- msg += messages.get(name[4:]) or name[4:]
- elif name.startswith("Build"):
- msg += "build %s: " % e.name
- msg += messages.get(name[5:]) or name[5:]
- elif name == "UnsatisfiedDep":
- msg += "package %s: dependency %s %s" % (e.pkg, e.dep, name[:-3].lower())
+ prefixes = []
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ prefixes.append(eext[1])
- # Only need to output when using 1.8 or lower, the UI code handles it
- # otherwise
- if (int(bb.__version__.split(".")[0]) <= 1 and int(bb.__version__.split(".")[1]) <= 8):
- if msg:
- note(msg)
+ versions = []
+ providers = []
+ for v in d.keys():
+ if v.startswith("PREFERRED_VERSION_"):
+ versions.append(v)
+ if v.startswith("PREFERRED_PROVIDER_"):
+ providers.append(v)
- if name.startswith("BuildStarted"):
- bb.data.setVar( 'BB_VERSION', bb.__version__, e.data )
- statusvars = ['BB_VERSION', 'METADATA_BRANCH', 'METADATA_REVISION', 'TARGET_ARCH', 'TARGET_OS', 'MACHINE', 'DISTRO', 'DISTRO_VERSION','TARGET_FPU']
- statuslines = ["%-17s = \"%s\"" % (i, bb.data.getVar(i, e.data, 1) or '') for i in statusvars]
- statusmsg = "\nOE Build Configuration:\n%s\n" % '\n'.join(statuslines)
- print statusmsg
+ for v in versions:
+ val = d.getVar(v, False)
+ pkg = v.replace("PREFERRED_VERSION_", "")
+ if pkg.endswith("-native") or pkg.endswith("-nativesdk"):
+ continue
+ for p in prefixes:
+ newname = "PREFERRED_VERSION_" + p + "-" + pkg
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
- needed_vars = [ "TARGET_ARCH", "TARGET_OS" ]
- pesteruser = []
- for v in needed_vars:
- val = bb.data.getVar(v, e.data, 1)
- if not val or val == 'INVALID':
- pesteruser.append(v)
- if pesteruser:
- bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
+ for prov in providers:
+ val = d.getVar(prov, False)
+ pkg = prov.replace("PREFERRED_PROVIDER_", "")
+ if pkg.endswith("-native") or pkg.endswith("-nativesdk"):
+ continue
+ virt = ""
+ if pkg.startswith("virtual/"):
+ pkg = pkg.replace("virtual/", "")
+ virt = "virtual/"
+ for p in prefixes:
+ newname = "PREFERRED_PROVIDER_" + virt + p + "-" + pkg
+ if pkg != "kernel":
+ val = p + "-" + val
+ if not d.getVar(newname, False):
+ d.setVar(newname, val)
- #
- # Handle removing stamps for 'rebuild' task
- #
- if name.startswith("StampUpdate"):
- for (fn, task) in e.targets:
- #print "%s %s" % (task, fn)
- if task == "do_rebuild":
- dir = "%s.*" % e.stampPrefix[fn]
- bb.note("Removing stamps: " + dir)
- os.system('rm -f '+ dir)
- os.system('touch ' + e.stampPrefix[fn] + '.needclean')
- if name == "ConfigParsed":
- generate_git_config(e)
+ mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+ extramp = []
+ for p in mp:
+ if p.endswith("-native") or p.endswith("-nativesdk"):
+ continue
+ virt = ""
+ if p.startswith("virtual/"):
+ p = p.replace("virtual/", "")
+ virt = "virtual/"
+ for pref in prefixes:
+ extramp.append(virt + pref + "-" + p)
+ d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
- if not data in e.__dict__:
- return NotHandled
- log = data.getVar("EVENTLOG", e.data, 1)
- if log:
- logfile = file(log, "a")
- logfile.write("%s\n" % msg)
- logfile.close()
+def get_layers_branch_rev(d):
+ layers = (d.getVar("BBLAYERS", True) or "").split()
+ layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
+ base_get_metadata_git_branch(i, None).strip(), \
+ base_get_metadata_git_revision(i, None)) \
+ for i in layers]
+ i = len(layers_branch_rev)-1
+ p1 = layers_branch_rev[i].find("=")
+ s1 = layers_branch_rev[i][p1:]
+ while i > 0:
+ p2 = layers_branch_rev[i-1].find("=")
+ s2= layers_branch_rev[i-1][p2:]
+ if s1 == s2:
+ layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
+ i -= 1
+ else:
+ i -= 1
+ p1 = layers_branch_rev[i].find("=")
+ s1= layers_branch_rev[i][p1:]
+ return layers_branch_rev
- return NotHandled
+
+BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars"
+BUILDCFG_FUNCS[type] = "list"
+
+def buildcfg_vars(d):
+ statusvars = oe.data.typed_value('BUILDCFG_VARS', d)
+ for var in statusvars:
+ value = d.getVar(var, True)
+ if value is not None:
+ yield '%-17s = "%s"' % (var, value)
+
+def buildcfg_neededvars(d):
+ needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d)
+ pesteruser = []
+ for v in needed_vars:
+ val = d.getVar(v, True)
+ if not val or val == 'INVALID':
+ pesteruser.append(v)
+
+ if pesteruser:
+ bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
+
+addhandler base_eventhandler
+python base_eventhandler() {
+ if isinstance(e, bb.event.ConfigParsed):
+ e.data.setVar('BB_VERSION', bb.__version__)
+ generate_git_config(e)
+ pkgarch_mapping(e.data)
+ preferred_ml_updates(e.data)
+
+ if isinstance(e, bb.event.BuildStarted):
+ statuslines = []
+ for func in oe.data.typed_value('BUILDCFG_FUNCS', e.data):
+ g = globals()
+ if func not in g:
+ bb.warn("Build configuration function '%s' does not exist" % func)
+ else:
+ flines = g[func](e.data)
+ if flines:
+ statuslines.extend(flines)
+
+ statusheader = e.data.getVar('BUILDCFG_HEADER', True)
+ bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
}
-addtask configure after do_unpack do_patch
-do_configure[dirs] = "${S} ${B}"
-do_configure[deptask] = "do_populate_staging"
+addtask configure after do_patch
+do_configure[dirs] = "${CCACHE_DIR} ${S} ${B}"
+do_configure[deptask] = "do_populate_sysroot"
base_do_configure() {
:
}
@@ -910,28 +282,10 @@ base_do_compile() {
if [ -e Makefile -o -e makefile ]; then
oe_runmake || die "make failed"
else
- oenote "nothing to compile"
+ bbnote "nothing to compile"
fi
}
-base_do_stage () {
- :
-}
-
-do_populate_staging[dirs] = "${STAGING_DIR_TARGET}/${layout_bindir} ${STAGING_DIR_TARGET}/${layout_libdir} \
- ${STAGING_DIR_TARGET}/${layout_includedir} \
- ${STAGING_BINDIR_NATIVE} ${STAGING_LIBDIR_NATIVE} \
- ${STAGING_INCDIR_NATIVE} \
- ${STAGING_DATADIR} \
- ${S} ${B}"
-
-# Could be compile but populate_staging and do_install shouldn't run at the same time
-addtask populate_staging after do_install
-
-python do_populate_staging () {
- bb.build.exec_func('do_stage', d)
-}
-
addtask install after do_compile
do_install[dirs] = "${D} ${S} ${B}"
# Remove and re-create ${D} so that is it guaranteed to be empty
@@ -945,81 +299,203 @@ base_do_package() {
:
}
-addtask build after do_populate_staging
+addtask build after do_populate_sysroot
do_build = ""
do_build[func] = "1"
+do_build[noexec] = "1"
+do_build[recrdeptask] += "do_deploy"
+do_build () {
+ :
+}
+
+python () {
+ import exceptions, string, re
+
+ # Handle PACKAGECONFIG
+ #
+ # These take the form:
+ #
+ # PACKAGECONFIG ?? = "<default options>"
+ # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
+ pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {}
+ if pkgconfigflags:
+ pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
+ pn = d.getVar("PN", True)
+ mlprefix = d.getVar("MLPREFIX", True)
-# Make sure MACHINE isn't exported
-# (breaks binutils at least)
-MACHINE[unexport] = "1"
+ def expandFilter(appends, extension, prefix):
+ appends = bb.utils.explode_deps(d.expand(" ".join(appends)))
+ newappends = []
+ for a in appends:
+ if a.endswith("-native") or a.endswith("-cross"):
+ newappends.append(a)
+ elif a.startswith("virtual/"):
+ subs = a.split("/", 1)[1]
+ newappends.append("virtual/" + prefix + subs + extension)
+ else:
+ newappends.append(prefix + a + extension)
+ return newappends
-# Make sure TARGET_ARCH isn't exported
-# (breaks Makefiles using implicit rules, e.g. quilt, as GNU make has this
-# in them, undocumented)
-TARGET_ARCH[unexport] = "1"
+ def appendVar(varname, appends):
+ if not appends:
+ return
+ if varname.find("DEPENDS") != -1:
+ if pn.endswith("-nativesdk"):
+ appends = expandFilter(appends, "-nativesdk", "")
+ if pn.endswith("-native"):
+ appends = expandFilter(appends, "-native", "")
+ if mlprefix:
+ appends = expandFilter(appends, "", mlprefix)
+ varname = d.expand(varname)
+ d.appendVar(varname, " " + " ".join(appends))
-# Make sure DISTRO isn't exported
-# (breaks sysvinit at least)
-DISTRO[unexport] = "1"
+ extradeps = []
+ extrardeps = []
+ extraconf = []
+ for flag, flagval in pkgconfigflags.items():
+ if flag == "defaultval":
+ continue
+ items = flagval.split(",")
+ if len(items) == 3:
+ enable, disable, depend = items
+ rdepend = ""
+ elif len(items) == 4:
+ enable, disable, depend, rdepend = items
+ if flag in pkgconfig:
+ if depend:
+ extradeps.append(depend)
+ if rdepend:
+ extrardeps.append(rdepend)
+ if enable:
+ extraconf.append(enable)
+ elif disable:
+ extraconf.append(disable)
+ appendVar('DEPENDS', extradeps)
+ appendVar('RDEPENDS_${PN}', extrardeps)
+ appendVar('EXTRA_OECONF', extraconf)
+ # If PRINC is set, try and increase the PR value by the amount specified
+ princ = d.getVar('PRINC', True)
+ if princ and princ != "0":
+ pr = d.getVar('PR', True)
+ pr_prefix = re.search("\D+",pr)
+ prval = re.search("\d+",pr)
+ if pr_prefix is None or prval is None:
+ bb.error("Unable to analyse format of PR variable: %s" % pr)
+ nval = int(prval.group(0)) + int(princ)
+ pr = pr_prefix.group(0) + str(nval) + pr[prval.end():]
+ d.setVar('PR', pr)
-def base_after_parse(d):
- import bb, os, exceptions
+ pn = d.getVar('PN', True)
+ license = d.getVar('LICENSE', True)
+ if license == "INVALID":
+ bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
- source_mirror_fetch = bb.data.getVar('SOURCE_MIRROR_FETCH', d, 0)
+ unmatched_license_flag = check_license_flags(d)
+ if unmatched_license_flag:
+ bb.debug(1, "Skipping %s because it has a restricted license not"
+ " whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
+ raise bb.parse.SkipPackage("because it has a restricted license not"
+ " whitelisted in LICENSE_FLAGS_WHITELIST")
+
+ # If we're building a target package we need to use fakeroot (pseudo)
+ # in order to capture permissions, owners, groups and special files
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
+ d.setVarFlag('do_configure', 'umask', 022)
+ d.setVarFlag('do_compile', 'umask', 022)
+ d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_install', 'fakeroot', 1)
+ d.setVarFlag('do_install', 'umask', 022)
+ d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_package', 'fakeroot', 1)
+ d.setVarFlag('do_package', 'umask', 022)
+ d.setVarFlag('do_package_setscene', 'fakeroot', 1)
+ source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
if not source_mirror_fetch:
- need_host = bb.data.getVar('COMPATIBLE_HOST', d, 1)
+ need_host = d.getVar('COMPATIBLE_HOST', True)
if need_host:
import re
- this_host = bb.data.getVar('HOST_SYS', d, 1)
+ this_host = d.getVar('HOST_SYS', True)
if not re.match(need_host, this_host):
- raise bb.parse.SkipPackage("incompatible with host %s" % this_host)
+ raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
- need_machine = bb.data.getVar('COMPATIBLE_MACHINE', d, 1)
+ need_machine = d.getVar('COMPATIBLE_MACHINE', True)
if need_machine:
import re
- this_machine = bb.data.getVar('MACHINE', d, 1)
+ this_machine = d.getVar('MACHINE', True)
if this_machine and not re.match(need_machine, this_machine):
- raise bb.parse.SkipPackage("incompatible with machine %s" % this_machine)
+ this_soc_family = d.getVar('SOC_FAMILY', True)
+ if (this_soc_family and not re.match(need_machine, this_soc_family)) or not this_soc_family:
+ raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % this_machine)
+
+
+ dont_want_license = d.getVar('INCOMPATIBLE_LICENSE', True)
+
+ if dont_want_license and not pn.endswith("-native") and not pn.endswith("-cross") and not pn.endswith("-cross-initial") and not pn.endswith("-cross-intermediate") and not pn.endswith("-crosssdk-intermediate") and not pn.endswith("-crosssdk") and not pn.endswith("-crosssdk-initial") and not pn.endswith("-nativesdk"):
+ # Internally, we'll use the license mapping. This way INCOMPATIBLE_LICENSE = "GPLv2" and
+ # INCOMPATIBLE_LICENSE = "GPLv2.0" will pick up all variations of GPL-2.0
+ spdx_license = return_spdx(d, dont_want_license)
+ hosttools_whitelist = (d.getVar('HOSTTOOLS_WHITELIST_%s' % dont_want_license, True) or d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split()
+ lgplv2_whitelist = (d.getVar('LGPLv2_WHITELIST_%s' % dont_want_license, True) or d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split()
+ dont_want_whitelist = (d.getVar('WHITELIST_%s' % dont_want_license, True) or d.getVar('HOSTTOOLS_WHITELIST_%s' % spdx_license, True) or "").split()
+ if pn not in hosttools_whitelist and pn not in lgplv2_whitelist and pn not in dont_want_whitelist:
+ this_license = d.getVar('LICENSE', True)
+ # At this point we know the recipe contains an INCOMPATIBLE_LICENSE, however it may contain packages that do not.
+ packages = d.getVar('PACKAGES', True).split()
+ dont_skip_recipe = False
+ skipped_packages = {}
+ unskipped_packages = []
+ for pkg in packages:
+ if incompatible_license(d, dont_want_license, pkg):
+ skipped_packages[pkg] = this_license
+ dont_skip_recipe = True
+ else:
+ unskipped_packages.append(pkg)
+ if not unskipped_packages:
+ # if we hit here and have excluded all packages, then we can just exclude the recipe
+ dont_skip_recipe = False
+ elif skipped_packages and unskipped_packages:
+ for pkg, license in skipped_packages.iteritems():
+ bb.note("SKIPPING the package " + pkg + " at do_rootfs because it's " + this_license)
+ d.setVar('LICENSE_EXCLUSION-' + pkg, 1)
+ for index, pkg in enumerate(unskipped_packages):
+ bb.note("INCLUDING the package " + pkg)
- pn = bb.data.getVar('PN', d, 1)
+ if dont_skip_recipe is False and incompatible_license(d, dont_want_license):
+ bb.note("SKIPPING recipe %s because it's %s" % (pn, this_license))
+ raise bb.parse.SkipPackage("incompatible with license %s" % this_license)
- # OBSOLETE in bitbake 1.7.4
- srcdate = bb.data.getVar('SRCDATE_%s' % pn, d, 1)
- if srcdate != None:
- bb.data.setVar('SRCDATE', srcdate, d)
- use_nls = bb.data.getVar('USE_NLS_%s' % pn, d, 1)
- if use_nls != None:
- bb.data.setVar('USE_NLS', use_nls, d)
+
+ srcuri = d.getVar('SRC_URI', True)
+ # Svn packages should DEPEND on subversion-native
+ if "svn://" in srcuri:
+ d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
# Git packages should DEPEND on git-native
- srcuri = bb.data.getVar('SRC_URI', d, 1)
if "git://" in srcuri:
- depends = bb.data.getVarFlag('do_fetch', 'depends', d) or ""
- depends = depends + " git-native:do_populate_staging"
- bb.data.setVarFlag('do_fetch', 'depends', depends, d)
+ d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
+
+ # Mercurial packages should DEPEND on mercurial-native
+ elif "hg://" in srcuri:
+ d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
# OSC packages should DEPEND on osc-native
- srcuri = bb.data.getVar('SRC_URI', d, 1)
- if "osc://" in srcuri:
- depends = bb.data.getVarFlag('do_fetch', 'depends', d) or ""
- depends = depends + " osc-native:do_populate_staging"
- bb.data.setVarFlag('do_fetch', 'depends', depends, d)
+ elif "osc://" in srcuri:
+ d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
- # bb.utils.sha256_file() will fail if hashlib isn't present, so we fallback
- # on shasum-native. We need to ensure that it is staged before we fetch.
- if bb.data.getVar('PN', d, True) != "shasum-native":
- try:
- import hashlib
- except ImportError:
- depends = bb.data.getVarFlag('do_fetch', 'depends', d) or ""
- depends = depends + " shasum-native:do_populate_staging"
- bb.data.setVarFlag('do_fetch', 'depends', depends, d)
+ # *.xz should depends on xz-native for unpacking
+ # Not endswith because of "*.patch.xz;patch=1". Need bb.decodeurl in future
+ if '.xz' in srcuri:
+ d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+
+ # unzip-native should already be staged before unpacking ZIP recipes
+ if ".zip" in srcuri:
+ d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
# 'multimachine' handling
- mach_arch = bb.data.getVar('MACHINE_ARCH', d, 1)
- pkg_arch = bb.data.getVar('PACKAGE_ARCH', d, 1)
+ mach_arch = d.getVar('MACHINE_ARCH', True)
+ pkg_arch = d.getVar('PACKAGE_ARCH', True)
if (pkg_arch == mach_arch):
# Already machine specific - nothing further to do
@@ -1029,125 +505,60 @@ def base_after_parse(d):
# We always try to scan SRC_URI for urls with machine overrides
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
#
- override = bb.data.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', d, 1)
+ override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', True)
if override != '0':
paths = []
- for p in [ "${PF}", "${P}", "${PN}", "files", "" ]:
- path = bb.data.expand(os.path.join("${FILE_DIRNAME}", p, "${MACHINE}"), d)
- if os.path.isdir(path):
- paths.append(path)
+ fpaths = (d.getVar('FILESPATH', True) or '').split(':')
+ machine = d.getVar('MACHINE', True)
+ for p in fpaths:
+ if os.path.basename(p) == machine and os.path.isdir(p):
+ paths.append(p)
+
if len(paths) != 0:
for s in srcuri.split():
if not s.startswith("file://"):
continue
- local = bb.data.expand(bb.fetch.localpath(s, d), d)
+ fetcher = bb.fetch2.Fetch([s], d)
+ local = fetcher.localpath(s)
for mp in paths:
if local.startswith(mp):
- #bb.note("overriding PACKAGE_ARCH from %s to %s" % (pkg_arch, mach_arch))
- bb.data.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}", d)
- bb.data.setVar('MULTIMACH_ARCH', mach_arch, d)
+ #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn))
+ d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
return
- multiarch = pkg_arch
-
- packages = bb.data.getVar('PACKAGES', d, 1).split()
+ packages = d.getVar('PACKAGES', True).split()
for pkg in packages:
- pkgarch = bb.data.getVar("PACKAGE_ARCH_%s" % pkg, d, 1)
+ pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, True)
- # We could look for != PACKAGE_ARCH here but how to choose
+ # We could look for != PACKAGE_ARCH here but how to choose
# if multiple differences are present?
# Look through PACKAGE_ARCHS for the priority order?
if pkgarch and pkgarch == mach_arch:
- multiarch = mach_arch
- break
-
- bb.data.setVar('MULTIMACH_ARCH', multiarch, d)
-
-python () {
- base_after_parse(d)
+ d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
+ bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
}
-def check_app_exists(app, d):
- from bb import which, data
-
- app = data.expand(app, d)
- path = data.getVar('PATH', d, 1)
- return len(which(path, app)) != 0
-
-def check_gcc3(data):
-
- gcc3_versions = 'gcc-3.4.6 gcc-3.4.7 gcc-3.4 gcc34 gcc-3.4.4 gcc-3.3 gcc33 gcc-3.3.6 gcc-3.2 gcc32'
-
- for gcc3 in gcc3_versions.split():
- if check_app_exists(gcc3, data):
- return gcc3
-
- return False
-
-# Patch handling
-inherit patch
-
-# Configuration data from site files
-# Move to autotools.bbclass?
-inherit siteinfo
+addtask cleansstate after do_clean
+python do_cleansstate() {
+ sstate_clean_cachefiles(d)
+}
-EXPORT_FUNCTIONS do_setscene do_clean do_fetch do_unpack do_configure do_compile do_install do_package do_populate_pkgs do_stage do_rebuild do_fetchall
+addtask cleanall after do_cleansstate
+python do_cleanall() {
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
-MIRRORS[func] = "0"
-MIRRORS () {
-${DEBIAN_MIRROR}/main http://snapshot.debian.net/archive/pool
-${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool
-${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool
-${GNU_MIRROR} ftp://mirrors.kernel.org/gnu
-${GNU_MIRROR} ftp://ftp.matrix.com.br/pub/gnu
-${GNU_MIRROR} ftp://ftp.cs.ubc.ca/mirror2/gnu
-${GNU_MIRROR} ftp://sunsite.ust.hk/pub/gnu
-${GNU_MIRROR} ftp://ftp.ayamura.org/pub/gnu
-${KERNELORG_MIRROR} http://www.kernel.org/pub
-${KERNELORG_MIRROR} ftp://ftp.us.kernel.org/pub
-${KERNELORG_MIRROR} ftp://ftp.uk.kernel.org/pub
-${KERNELORG_MIRROR} ftp://ftp.hk.kernel.org/pub
-${KERNELORG_MIRROR} ftp://ftp.au.kernel.org/pub
-${KERNELORG_MIRROR} ftp://ftp.jp.kernel.org/pub
-ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/
-ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/
-ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/
-ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN
-ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/
-ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/
-ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnutls.org/pub/gnutls/
-ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/
-ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/
-ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/
-ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/
-http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/
-http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/
-ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/
-http://www.apache.org/dist http://archive.apache.org/dist
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.clean()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
}
+do_cleanall[nostamp] = "1"
+
+EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package
diff --git a/meta/classes/base_srpm.bbclass b/meta/classes/base_srpm.bbclass
deleted file mode 100644
index aea633527..000000000
--- a/meta/classes/base_srpm.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-inherit base package rpm_core
-
-SPECFILE="${RPMBUILDPATH}/SPECS/${PN}.spec"
-
-base_srpm_do_unpack() {
- test -e ${SRPMFILE} || die "Source rpm \"${SRPMFILE}\"does not exist"
- if ! test -e ${SPECFILE}; then
- ${RPM} -i ${SRPMFILE}
- fi
- test -e ${SPECFILE} || die "Spec file \"${SPECFILE}\" does not exist"
- ${RPMBUILD} -bp ${SPECFILE}
-}
-
-base_srpm_do_compile() {
- ${RPMBUILD} -bc ${SPECFILE}
-}
-
-base_srpm_do_install() {
- ${RPMBUILD} -bi ${SPECFILE}
-}
diff --git a/meta/classes/binconfig.bbclass b/meta/classes/binconfig.bbclass
index 4e425a76d..3deb5415e 100644
--- a/meta/classes/binconfig.bbclass
+++ b/meta/classes/binconfig.bbclass
@@ -1,57 +1,54 @@
-inherit base
-
# The namespaces can clash here hence the two step replace
def get_binconfig_mangle(d):
- import bb.data
s = "-e ''"
if not bb.data.inherits_class('native', d):
optional_quote = r"\(\"\?\)"
s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
- s += " -e 's:=%s${prefix}:=\\1OEPREFIX:'" % optional_quote
- s += " -e 's:=%s${exec_prefix}:=\\1OEEXECPREFIX:'" % optional_quote
+ s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
+ s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
- s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${layout_prefix}:'"
- s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${layout_exec_prefix}:'"
+ s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
+ s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
return s
BINCONFIG_GLOB ?= "*-config"
-do_install_append() {
-
- #the 'if' protects native packages, since we can't easily check for bb.data.inherits_class('native', d) in shell
- if [ -e ${D}${bindir} ] ; then
- for config in `find ${S} -name '${BINCONFIG_GLOB}'`; do
- cat $config | sed \
- -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
- -e 's:${STAGING_INCDIR}:${includedir}:g;' \
- -e 's:${STAGING_DATADIR}:${datadir}:' \
- -e 's:${STAGING_DIR_HOST}${layout_prefix}:${prefix}:' > ${D}${bindir}/`basename $config`
- done
- fi
+PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
- for lafile in `find ${D} -name *.la` ; do
+binconfig_package_preprocess () {
+ for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
+ sed -i \
+ -e 's:${STAGING_LIBDIR}:${libdir}:g;' \
+ -e 's:${STAGING_INCDIR}:${includedir}:g;' \
+ -e 's:${STAGING_DATADIR}:${datadir}:' \
+ -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
+ $config
+ done
+ for lafile in `find ${PKGD} -name "*.la"` ; do
sed -i \
-e 's:${STAGING_LIBDIR}:${libdir}:g;' \
-e 's:${STAGING_INCDIR}:${includedir}:g;' \
-e 's:${STAGING_DATADIR}:${datadir}:' \
- -e 's:${STAGING_DIR_HOST}${layout_prefix}:${prefix}:' \
+ -e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
$lafile
done
}
-do_stage_append() {
+SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
+
+binconfig_sysroot_preprocess () {
for config in `find ${S} -name '${BINCONFIG_GLOB}'`; do
configname=`basename $config`
- install -d ${STAGING_BINDIR_CROSS}
- cat $config | sed ${@get_binconfig_mangle(d)} > ${STAGING_BINDIR_CROSS}/$configname
- chmod u+x ${STAGING_BINDIR_CROSS}/$configname
+ install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
+ cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
+ chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
done
}
diff --git a/meta/classes/blacklist.bbclass b/meta/classes/blacklist.bbclass
new file mode 100644
index 000000000..75abd99c2
--- /dev/null
+++ b/meta/classes/blacklist.bbclass
@@ -0,0 +1,20 @@
+# anonymous support class from originally from angstrom
+#
+# To use the blacklist, a distribution should include this
+# class in the INHERIT_DISTRO
+#
+# No longer use ANGSTROM_BLACKLIST, instead use a table of
+# recipes in PNBLACKLIST
+#
+# Features:
+#
+# * To add a package to the blacklist, set:
+# PNBLACKLIST[pn] = "message"
+#
+
+python () {
+ blacklist = d.getVarFlag('PNBLACKLIST', d.getVar('PN', True), True)
+
+ if blacklist:
+ raise bb.parse.SkipPackage("Recipe is blacklisted: %s" % (blacklist))
+}
diff --git a/meta/classes/boot-directdisk.bbclass b/meta/classes/boot-directdisk.bbclass
new file mode 100644
index 000000000..7d8f8ff78
--- /dev/null
+++ b/meta/classes/boot-directdisk.bbclass
@@ -0,0 +1,100 @@
+# boot-directdisk.bbclass
+# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
+#
+# Create an image which can be placed directly onto a harddisk using dd and then
+# booted.
+#
+# This uses syslinux. extlinux would have been nice but required the ext2/3
+# partition to be mounted. grub requires to run itself as part of the install
+# process.
+#
+# The end result is a 512 boot sector populated with an MBR and partition table
+# followed by an msdos fat16 partition containing syslinux and a linux kernel
+# completed by the ext2/3 rootfs.
+#
+# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
+# won't touch fat12 partitions.
+
+# External variables needed
+
+# ${ROOTFS} - the rootfs image to incorporate
+
+do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
+ syslinux:do_populate_sysroot \
+ syslinux-native:do_populate_sysroot \
+ parted-native:do_populate_sysroot \
+ mtools-native:do_populate_sysroot "
+
+PACKAGES = " "
+EXCLUDE_FROM_WORLD = "1"
+
+HDDDIR = "${S}/hdd/boot"
+HDDIMG = "${S}/hdd.image"
+
+BOOTDD_VOLUME_ID ?= "boot"
+BOOTDD_EXTRA_SPACE ?= "16384"
+
+# Get the build_syslinux_cfg() function from the syslinux class
+
+AUTO_SYSLINUXCFG = "1"
+SYSLINUX_ROOT ?= "root=/dev/sda2"
+SYSLINUX_TIMEOUT ?= "10"
+
+inherit syslinux
+
+build_boot_dd() {
+ IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
+
+ install -d ${HDDDIR}
+ install -m 0644 ${STAGING_DIR_HOST}/kernel/bzImage ${HDDDIR}/vmlinuz
+ install -m 0644 ${S}/syslinux.cfg ${HDDDIR}/syslinux.cfg
+ install -m 444 ${STAGING_LIBDIR}/syslinux/ldlinux.sys ${HDDDIR}/ldlinux.sys
+
+ BLOCKS=`du -bks ${HDDDIR} | cut -f 1`
+ BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
+
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
+
+ mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C ${HDDIMG} $BLOCKS
+ mcopy -i ${HDDIMG} -s ${HDDDIR}/* ::/
+
+ syslinux ${HDDIMG}
+ chmod 644 ${HDDIMG}
+
+ ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
+ TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
+ END1=`expr $BLOCKS \* 1024`
+ END2=`expr $END1 + 512`
+ END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
+
+ echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
+ rm -rf $IMAGE
+ dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
+
+ parted $IMAGE mklabel msdos
+ parted $IMAGE mkpart primary fat16 0 ${END1}B
+ parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
+ parted $IMAGE set 1 boot on
+ parted $IMAGE print
+
+ OFFSET=`expr $END2 / 512`
+ dd if=${STAGING_LIBDIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
+ dd if=${HDDIMG} of=$IMAGE conv=notrunc seek=1 bs=512
+ dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
+
+ cd ${DEPLOY_DIR_IMAGE}
+ rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+ ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+}
+
+python do_bootdirectdisk() {
+ bb.build.exec_func('build_syslinux_cfg', d)
+ bb.build.exec_func('build_boot_dd', d)
+}
+
+addtask bootdirectdisk before do_build
+do_bootdirectdisk[nostamp] = "1"
diff --git a/meta/classes/bootimg.bbclass b/meta/classes/bootimg.bbclass
index 37a59c2d7..370b3786f 100644
--- a/meta/classes/bootimg.bbclass
+++ b/meta/classes/bootimg.bbclass
@@ -1,112 +1,192 @@
-# bootimg.oeclass
# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
# Released under the MIT license (see packages/COPYING)
-# This creates a bootable image using syslinux, your kernel and an optional
+# Creates a bootable image using syslinux, your kernel and an optional
# initrd
-# External variables needed
+#
+# End result is two things:
+#
+# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
+# an initrd and a rootfs image. These can be written to harddisks directly and
+# also booted on USB flash disks (write them there with dd).
+#
+# 2. A CD .iso image
+
+# Boot process is that the initrd will boot and process which label was selected
+# in syslinux. Actions based on the label are then performed (e.g. installing to
+# an hdd)
+
+# External variables (also used by syslinux.bbclass)
# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
-# ${AUTO_SYSLINUXCFG} - set this to 1 to enable creating an automatic config
-# ${LABELS} - a list of targets for the automatic config
-# ${APPEND} - an override list of append strings for each label
-# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
+# ${NOISO} - skip building the ISO image if set to 1
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-do_bootimg[depends] += "dosfstools-native:do_populate_staging \
- syslinux:do_populate_staging \
- syslinux-installer-native:do_populate_staging \
- mtools-native:do_populate_staging \
- cdrtools-native:do_populate_staging"
+do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
+ mtools-native:do_populate_sysroot \
+ cdrtools-native:do_populate_sysroot"
PACKAGES = " "
+EXCLUDE_FROM_WORLD = "1"
-HDDDIR = "${S}/hdd/boot"
-ISODIR = "${S}/cd/isolinux"
+HDDDIR = "${S}/hddimg"
+ISODIR = "${S}/iso"
-BOOTIMG_VOLUME_ID ?= "oe"
+BOOTIMG_VOLUME_ID ?= "boot"
BOOTIMG_EXTRA_SPACE ?= "512"
-# Get the build_syslinux_cfg() function from the syslinux class
+EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
+EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "grub-efi", "dummy", d)}"
+
+# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
+# contain "efi". This way legacy is supported by default if neither is
+# specified, maintaining the original behavior.
+def pcbios(d):
+ pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
+ if pcbios == "0":
+ pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
+ return pcbios
+
+def pcbios_class(d):
+ if d.getVar("PCBIOS", True) == "1":
+ return "syslinux"
+ return "dummy"
-SYSLINUXCFG = "${HDDDIR}/syslinux.cfg"
-SYSLINUXMENU = "${HDDDIR}/menu"
+PCBIOS = "${@pcbios(d)}"
+PCBIOS_CLASS = "${@pcbios_class(d)}"
-inherit syslinux
-
-build_boot_bin() {
- install -d ${HDDDIR}
- install -m 0644 ${STAGING_DIR}/${MACHINE}${HOST_VENDOR}-${HOST_OS}/kernel/bzImage \
- ${HDDDIR}/vmlinuz
+inherit ${PCBIOS_CLASS}
+inherit ${EFI_CLASS}
- if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
- install -m 0644 ${INITRD} ${HDDDIR}/initrd
+populate() {
+ DEST=$1
+ install -d ${DEST}
+
+ # Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
+ install -m 0644 ${STAGING_DIR_HOST}/kernel/bzImage ${DEST}/vmlinuz
+
+ if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
+ install -m 0644 ${INITRD} ${DEST}/initrd
fi
- if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
- install -m 0644 ${ROOTFS} ${HDDDIR}/rootfs.img
+ if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
+ install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
fi
- install -m 444 ${STAGING_DATADIR}/syslinux/ldlinux.sys \
- ${HDDDIR}/ldlinux.sys
+}
+
+build_iso() {
+ # Only create an ISO if we have an INITRD and NOISO was not set
+ if [ -z "${INITRD}" ] || [ ! -s "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
+ bbnote "ISO image will not be created."
+ return
+ fi
- # Do a little math, bash style
- #BLOCKS=`du -s ${HDDDIR} | cut -f 1`
- BLOCKS=`du -bks ${HDDDIR} | cut -f 1`
- SIZE=`expr $BLOCKS + ${BOOTIMG_EXTRA_SPACE}`
+ populate ${ISODIR}
- mkdosfs -n ${BOOTIMG_VOLUME_ID} -d ${HDDDIR} \
- -C ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg $SIZE
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_iso_populate
+ fi
+ if [ "${EFI}" = "1" ]; then
+ grubefi_iso_populate
+ fi
- syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
- chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+ if [ "${PCBIOS}" = "1" ]; then
+ mkisofs -V ${BOOTIMG_VOLUME_ID} \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+ -b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} -r \
+ ${MKISOFS_OPTIONS} ${ISODIR}
+ else
+ bbnote "EFI-only ISO images are untested, please provide feedback."
+ mkisofs -V ${BOOTIMG_VOLUME_ID} \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+ -r ${ISODIR}
+ fi
+
+ isohybrid ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
cd ${DEPLOY_DIR_IMAGE}
- rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
- ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
-
- #Create an ISO if we have an INITRD
- if [ -n "${INITRD}" ] && [ -s "${INITRD}" ] && [ "${NOISO}" != "1" ] ; then
- install -d ${ISODIR}
+ rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
+ ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
+}
- # Install the kernel
+build_hddimg() {
+ # Create an HDD image
+ if [ "${NOHDD}" != "1" ] ; then
+ populate ${HDDDIR}
- install -m 0644 ${STAGING_DIR}/${MACHINE}${HOST_VENDOR}-${HOST_OS}/kernel/bzImage \
- ${ISODIR}/vmlinuz
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_hddimg_populate
+ fi
+ if [ "${EFI}" = "1" ]; then
+ grubefi_hddimg_populate
+ fi
- # Install the configuration files
+ # Calculate the size required for the final image including the
+ # data and filesystem overhead.
+ # Sectors: 512 bytes
+ # Blocks: 1024 bytes
- cp ${HDDDIR}/syslinux.cfg ${ISODIR}/isolinux.cfg
+ # Determine the sector count just for the data
+ SECTORS=$(expr $(du --apparent-size -ks ${HDDDIR} | cut -f 1) \* 2)
- if [ -f ${SYSLINUXMENU} ]; then
- cp ${SYSLINUXMENU} ${ISODIR}
- fi
+ # Account for the filesystem overhead. This includes directory
+ # entries in the clusters as well as the FAT itself.
+ # Assumptions:
+ # FAT32 (12 or 16 may be selected by mkdosfs, but the extra
+ # padding will be minimal on those smaller images and not
+ # worth the logic here to caclulate the smaller FAT sizes)
+ # < 16 entries per directory
+ # 8.3 filenames only
- install -m 0644 ${INITRD} ${ISODIR}/initrd
+ # 32 bytes per dir entry
+ DIR_BYTES=$(expr $(find ${HDDDIR} | tail -n +2 | wc -l) \* 32)
+ # 32 bytes for every end-of-directory dir entry
+ DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${HDDDIR} -type d | tail -n +2 | wc -l) \* 32))
+ # 4 bytes per FAT entry per sector of data
+ FAT_BYTES=$(expr $SECTORS \* 4)
+ # 4 bytes per FAT entry per end-of-cluster list
+ FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${HDDDIR} -type d | tail -n +2 | wc -l) \* 4))
- if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
- install -m 0644 ${ROOTFS} ${ISODIR}/rootfs.img
- fi
+ # Use a ceiling function to determine FS overhead in sectors
+ DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
+ # There are two FATs on the image
+ FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
+ SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
- # And install the syslinux stuff
- cp ${STAGING_DATADIR}/syslinux/isolinux.bin \
- ${ISODIR}
+ # Determine the final size in blocks accounting for some padding
+ BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
- mkisofs -V ${BOOTIMG_VOLUME_ID} \
- -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
- -b isolinux/isolinux.bin -c isolinux/boot.cat -r \
- -no-emul-boot -boot-load-size 4 -boot-info-table \
- ${S}/cd/
+ # Ensure total sectors is an integral number of sectors per
+ # track or mcopy will complain. Sectors are 512 bytes, and we
+ # generate images with 32 sectors per track. This calculation is
+ # done in blocks, thus the mod by 16 instead of 32.
+ BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
- cd ${DEPLOY_DIR_IMAGE}
- rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
- ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
+ IMG=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+ mkdosfs -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${IMG} ${BLOCKS}
+ # Copy HDDDIR recursively into the image file directly
+ mcopy -i ${IMG} -s ${HDDDIR}/* ::/
+
+ if [ "${PCBIOS}" = "1" ]; then
+ syslinux_hddimg_install
+ fi
+ chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+
+ cd ${DEPLOY_DIR_IMAGE}
+ rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
+ ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
fi
-}
+}
python do_bootimg() {
- bb.build.exec_func('build_syslinux_cfg', d)
- bb.build.exec_func('build_boot_bin', d)
+ if d.getVar("PCBIOS", True) == "1":
+ bb.build.exec_func('build_syslinux_cfg', d)
+ if d.getVar("EFI", True) == "1":
+ bb.build.exec_func('build_grub_cfg', d)
+ bb.build.exec_func('build_hddimg', d)
+ bb.build.exec_func('build_iso', d)
}
addtask bootimg before do_build
diff --git a/meta/classes/bugzilla.bbclass b/meta/classes/bugzilla.bbclass
new file mode 100644
index 000000000..4028d261c
--- /dev/null
+++ b/meta/classes/bugzilla.bbclass
@@ -0,0 +1,186 @@
+#
+# Small event handler to automatically open URLs and file
+# bug reports at a bugzilla of your choiche
+# it uses XML-RPC interface, so you must have it enabled
+#
+# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
+# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
+# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
+# BUGZILLA_VERSION - version against which to report new bugs
+#
+
+def bugzilla_find_bug_report(debug_file, server, args, bugname):
+ args['summary'] = bugname
+ bugs = server.Bug.search(args)
+ if len(bugs['bugs']) == 0:
+ print >> debug_file, "Bugs not found"
+ return (False,None)
+ else: # silently pick the first result
+ print >> debug_file, "Result of bug search is "
+ print >> debug_file, bugs
+ status = bugs['bugs'][0]['status']
+ id = bugs['bugs'][0]['id']
+ return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
+
+def bugzilla_file_bug(debug_file, server, args, name, text, version):
+ args['summary'] = name
+ args['comment'] = text
+ args['version'] = version
+ args['op_sys'] = 'Linux'
+ args['platform'] = 'Other'
+ args['severity'] = 'normal'
+ args['priority'] = 'Normal'
+ try:
+ return server.Bug.create(args)['id']
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return None
+
+def bugzilla_reopen_bug(debug_file, server, args, bug_number):
+ args['ids'] = [bug_number]
+ args['status'] = "CONFIRMED"
+ try:
+ server.Bug.update(args)
+ return True
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return False
+
+def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
+ args['ids'] = [bug_number]
+ args['file_name'] = file_name
+ args['summary'] = logdescription
+ args['content_type'] = "text/plain"
+ args['data'] = log
+ args['comment'] = text
+ try:
+ server.Bug.add_attachment(args)
+ return True
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return False
+
+def bugzilla_add_comment(debug_file, server, args, bug_number, text):
+ args['id'] = bug_number
+ args['comment'] = text
+ try:
+ server.Bug.add_comment(args)
+ return True
+ except Exception, e:
+ print >> debug_file, repr(e)
+ return False
+
+addhandler bugzilla_eventhandler
+python bugzilla_eventhandler() {
+ import bb, os, glob
+ import xmlrpclib, httplib
+
+ class ProxiedTransport(xmlrpclib.Transport):
+ def __init__(self, proxy, use_datetime = 0):
+ xmlrpclib.Transport.__init__(self, use_datetime)
+ self.proxy = proxy
+ self.user = None
+ self.password = None
+
+ def set_user(self, user):
+ self.user = user
+
+ def set_password(self, password):
+ self.password = password
+
+ def make_connection(self, host):
+ self.realhost = host
+ return httplib.HTTP(self.proxy)
+
+ def send_request(self, connection, handler, request_body):
+ connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
+ if self.user != None:
+ if self.password != None:
+ auth = "%s:%s" % (self.user, self.password)
+ else:
+ auth = self.user
+ connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
+
+ event = e
+ data = e.data
+ name = bb.event.getName(event)
+ if name == "MsgNote":
+ # avoid recursion
+ return
+
+ if name == "TaskFailed":
+ xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
+ user = data.getVar("BUGZILLA_USER", True)
+ passw = data.getVar("BUGZILLA_PASS", True)
+ product = data.getVar("BUGZILLA_PRODUCT", True)
+ compon = data.getVar("BUGZILLA_COMPONENT", True)
+ version = data.getVar("BUGZILLA_VERSION", True)
+
+ proxy = data.getVar('http_proxy', True )
+ if (proxy):
+ import urllib2
+ s, u, p, hostport = urllib2._parse_proxy(proxy)
+ transport = ProxiedTransport(hostport)
+ else:
+ transport = None
+
+ server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
+ args = {
+ 'Bugzilla_login': user,
+ 'Bugzilla_password': passw,
+ 'product': product,
+ 'component': compon}
+
+ # evil hack to figure out what is going on
+ debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
+
+ file = None
+ bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
+ "pv" : data.getVar("PV", True),
+ }
+ log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
+ text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
+ if len(log_file) != 0:
+ print >> debug_file, "Adding log file %s" % log_file[0]
+ file = open(log_file[0], 'r')
+ log = file.read()
+ file.close();
+ else:
+ print >> debug_file, "No log file found for the glob"
+ log = None
+
+ (bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
+ print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
+
+ # The bug is present and still open, attach an error log
+ if not bug_number:
+ bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
+ if not bug_number:
+ print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
+ else:
+ print >> debug_file, "The new bug_number: '%s'" % bug_number
+ elif not bug_open:
+ if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
+ print >> debug_file, "Failed to reopen the bug #%s" % bug_number
+ else:
+ print >> debug_file, "Reopened the bug #%s" % bug_number
+
+ if bug_number and log:
+ print >> debug_file, "The bug is known as '%s'" % bug_number
+ desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
+ if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
+ print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
+ else:
+ print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
+ else:
+ print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
+ if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
+ print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
+ else:
+ print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
+
+ # store bug number for oestats-client
+ if bug_number:
+ data.setVar('OESTATS_BUG_NUMBER', bug_number)
+}
+
diff --git a/meta/classes/buildhistory.bbclass b/meta/classes/buildhistory.bbclass
new file mode 100644
index 000000000..d2d19ff9c
--- /dev/null
+++ b/meta/classes/buildhistory.bbclass
@@ -0,0 +1,426 @@
+#
+# Records history of build output in order to detect regressions
+#
+# Based in part on testlab.bbclass and packagehistory.bbclass
+#
+# Copyright (C) 2011 Intel Corporation
+# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
+#
+
+BUILDHISTORY_FEATURES ?= "image package"
+BUILDHISTORY_DIR ?= "${TMPDIR}/buildhistory"
+BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
+BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
+BUILDHISTORY_COMMIT ?= "0"
+BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
+BUILDHISTORY_PUSH_REPO ?= ""
+
+# Must inherit package first before changing PACKAGEFUNCS
+inherit package
+PACKAGEFUNCS += "buildhistory_emit_pkghistory"
+
+# We don't want to force a rerun of do_package for everything
+# if the buildhistory_emit_pkghistory function or any of the
+# variables it refers to changes
+do_package[vardepsexclude] += "buildhistory_emit_pkghistory"
+
+#
+# Called during do_package to write out metadata about this package
+# for comparision when writing future packages
+#
+python buildhistory_emit_pkghistory() {
+ import re
+
+ if not "package" in (d.getVar('BUILDHISTORY_FEATURES', True) or "").split():
+ return 0
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+
+ class RecipeInfo:
+ def __init__(self, name):
+ self.name = name
+ self.pe = "0"
+ self.pv = "0"
+ self.pr = "r0"
+ self.depends = ""
+ self.packages = ""
+
+ class PackageInfo:
+ def __init__(self, name):
+ self.name = name
+ self.pe = "0"
+ self.pv = "0"
+ self.pr = "r0"
+ self.size = 0
+ self.depends = ""
+ self.rdepends = ""
+ self.rrecommends = ""
+ self.files = ""
+ self.filelist = ""
+
+ # Should check PACKAGES here to see if anything removed
+
+ def getpkgvar(pkg, var):
+ val = bb.data.getVar('%s_%s' % (var, pkg), d, 1)
+ if val:
+ return val
+ val = bb.data.getVar('%s' % (var), d, 1)
+
+ return val
+
+ def readRecipeInfo(pn, histfile):
+ rcpinfo = RecipeInfo(pn)
+ f = open(histfile, "r")
+ try:
+ for line in f:
+ lns = line.split('=')
+ name = lns[0].strip()
+ value = lns[1].strip(" \t\r\n").strip('"')
+ if name == "PE":
+ rcpinfo.pe = value
+ elif name == "PV":
+ rcpinfo.pv = value
+ elif name == "PR":
+ rcpinfo.pr = value
+ elif name == "DEPENDS":
+ rcpinfo.depends = value
+ elif name == "PACKAGES":
+ rcpinfo.packages = value
+ finally:
+ f.close()
+ return rcpinfo
+
+ def readPackageInfo(pkg, histfile):
+ pkginfo = PackageInfo(pkg)
+ f = open(histfile, "r")
+ try:
+ for line in f:
+ lns = line.split('=')
+ name = lns[0].strip()
+ value = lns[1].strip(" \t\r\n").strip('"')
+ if name == "PE":
+ pkginfo.pe = value
+ elif name == "PV":
+ pkginfo.pv = value
+ elif name == "PR":
+ pkginfo.pr = value
+ elif name == "RDEPENDS":
+ pkginfo.rdepends = value
+ elif name == "RRECOMMENDS":
+ pkginfo.rrecommends = value
+ elif name == "PKGSIZE":
+ pkginfo.size = long(value)
+ elif name == "FILES":
+ pkginfo.files = value
+ elif name == "FILELIST":
+ pkginfo.filelist = value
+ finally:
+ f.close()
+ return pkginfo
+
+ def getlastrecipeversion(pn):
+ try:
+ histfile = os.path.join(pkghistdir, "latest")
+ return readRecipeInfo(pn, histfile)
+ except EnvironmentError:
+ return None
+
+ def getlastpkgversion(pkg):
+ try:
+ histfile = os.path.join(pkghistdir, pkg, "latest")
+ return readPackageInfo(pkg, histfile)
+ except EnvironmentError:
+ return None
+
+ def sortpkglist(string):
+ pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0)
+ pkglist = [p.group(0) for p in pkgiter]
+ pkglist.sort()
+ return ' '.join(pkglist)
+
+ def sortlist(string):
+ items = string.split(' ')
+ items.sort()
+ return ' '.join(items)
+
+ pn = d.getVar('PN', True)
+ pe = d.getVar('PE', True) or "0"
+ pv = d.getVar('PV', True)
+ pr = d.getVar('PR', True)
+ packages = squashspaces(d.getVar('PACKAGES', True))
+
+ rcpinfo = RecipeInfo(pn)
+ rcpinfo.pe = pe
+ rcpinfo.pv = pv
+ rcpinfo.pr = pr
+ rcpinfo.depends = sortlist(squashspaces(d.getVar('DEPENDS', True) or ""))
+ rcpinfo.packages = packages
+ write_recipehistory(rcpinfo, d)
+ write_latestlink(None, pe, pv, pr, d)
+
+ # Apparently the version can be different on a per-package basis (see Python)
+ pkgdest = d.getVar('PKGDEST', True)
+ for pkg in packages.split():
+ pe = getpkgvar(pkg, 'PE') or "0"
+ pv = getpkgvar(pkg, 'PV')
+ pr = getpkgvar(pkg, 'PR')
+ #
+ # Find out what the last version was
+ # Make sure the version did not decrease
+ #
+ lastversion = getlastpkgversion(pkg)
+ if lastversion:
+ last_pe = lastversion.pe
+ last_pv = lastversion.pv
+ last_pr = lastversion.pr
+ r = bb.utils.vercmp((pe, pv, pr), (last_pe, last_pv, last_pr))
+ if r < 0:
+ bb.error("Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pe, last_pv, last_pr, pe, pv, pr))
+
+ pkginfo = PackageInfo(pkg)
+ pkginfo.pe = pe
+ pkginfo.pv = pv
+ pkginfo.pr = pr
+ pkginfo.rdepends = sortpkglist(squashspaces(getpkgvar(pkg, 'RDEPENDS') or ""))
+ pkginfo.rrecommends = sortpkglist(squashspaces(getpkgvar(pkg, 'RRECOMMENDS') or ""))
+ pkginfo.files = squashspaces(getpkgvar(pkg, 'FILES') or "")
+
+ # Gather information about packaged files
+ pkgdestpkg = os.path.join(pkgdest, pkg)
+ filelist = []
+ pkginfo.size = 0
+ for root, dirs, files in os.walk(pkgdestpkg):
+ relpth = os.path.relpath(root, pkgdestpkg)
+ for f in files:
+ fstat = os.lstat(os.path.join(root, f))
+ pkginfo.size += fstat.st_size
+ filelist.append(os.sep + os.path.join(relpth, f))
+ filelist.sort()
+ pkginfo.filelist = " ".join(filelist)
+
+ write_pkghistory(pkginfo, d)
+
+ write_latestlink(pkg, pe, pv, pr, d)
+}
+
+
+def write_recipehistory(rcpinfo, d):
+ bb.debug(2, "Writing recipe history")
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+
+ if not os.path.exists(pkghistdir):
+ os.makedirs(pkghistdir)
+
+ verfile = os.path.join(pkghistdir, "%s:%s-%s" % (rcpinfo.pe, rcpinfo.pv, rcpinfo.pr))
+ f = open(verfile, "w")
+ try:
+ if rcpinfo.pe != "0":
+ f.write("PE = %s\n" % rcpinfo.pe)
+ f.write("PV = %s\n" % rcpinfo.pv)
+ f.write("PR = %s\n" % rcpinfo.pr)
+ f.write("DEPENDS = %s\n" % rcpinfo.depends)
+ f.write("PACKAGES = %s\n" % rcpinfo.packages)
+ finally:
+ f.close()
+
+
+def write_pkghistory(pkginfo, d):
+ bb.debug(2, "Writing package history")
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+
+ verpath = os.path.join(pkghistdir, pkginfo.name)
+ if not os.path.exists(verpath):
+ os.makedirs(verpath)
+
+ verfile = os.path.join(verpath, "%s:%s-%s" % (pkginfo.pe, pkginfo.pv, pkginfo.pr))
+ f = open(verfile, "w")
+ try:
+ if pkginfo.pe != "0":
+ f.write("PE = %s\n" % pkginfo.pe)
+ f.write("PV = %s\n" % pkginfo.pv)
+ f.write("PR = %s\n" % pkginfo.pr)
+ f.write("RDEPENDS = %s\n" % pkginfo.rdepends)
+ f.write("RRECOMMENDS = %s\n" % pkginfo.rrecommends)
+ f.write("PKGSIZE = %d\n" % pkginfo.size)
+ f.write("FILES = %s\n" % pkginfo.files)
+ f.write("FILELIST = %s\n" % pkginfo.filelist)
+ finally:
+ f.close()
+
+
+def write_latestlink(pkg, pe, pv, pr, d):
+ import shutil
+
+ pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+
+ def rm_link(path):
+ try:
+ os.unlink(path)
+ except OSError:
+ return
+
+ if pkg:
+ filedir = os.path.join(pkghistdir, pkg)
+ else:
+ filedir = pkghistdir
+ latest_file = os.path.join(filedir, "latest")
+ ver_file = os.path.join(filedir, "%s:%s-%s" % (pe, pv, pr))
+ rm_link(latest_file)
+ if d.getVar('BUILDHISTORY_KEEP_VERSIONS', True) == '1':
+ shutil.copy(ver_file, latest_file)
+ else:
+ shutil.move(ver_file, latest_file)
+
+
+buildhistory_get_image_installed() {
+ # Anything requiring the use of the packaging system should be done in here
+ # in case the packaging files are going to be removed for this image
+
+ if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
+ return
+ fi
+
+ mkdir -p ${BUILDHISTORY_DIR_IMAGE}
+
+ # Get list of installed packages
+ list_installed_packages | sort > ${BUILDHISTORY_DIR_IMAGE}/installed-package-names.txt
+ INSTALLED_PKGS=`cat ${BUILDHISTORY_DIR_IMAGE}/installed-package-names.txt`
+
+ # Produce installed package file and size lists and dependency graph
+ echo -n > ${BUILDHISTORY_DIR_IMAGE}/installed-packages.txt
+ echo -n > ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp
+ echo -e "digraph depends {\n node [shape=plaintext]" > ${BUILDHISTORY_DIR_IMAGE}/depends.dot
+ for pkg in $INSTALLED_PKGS; do
+ pkgfile=`get_package_filename $pkg`
+ echo `basename $pkgfile` >> ${BUILDHISTORY_DIR_IMAGE}/installed-packages.txt
+ if [ -f $pkgfile ] ; then
+ pkgsize=`du -k $pkgfile | head -n1 | awk '{ print $1 }'`
+ echo $pkgsize $pkg >> ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp
+ fi
+
+ deps=`list_package_depends $pkg`
+ for dep in $deps ; do
+ echo "$pkg OPP $dep;" | sed -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' | sed 's:OPP:->:g'
+ done
+
+ recs=`list_package_recommends $pkg`
+ for rec in $recs ; do
+ echo "$pkg OPP $rec [style=dotted];" | sed -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' | sed 's:OPP:->:g'
+ done
+ done | sort | uniq >> ${BUILDHISTORY_DIR_IMAGE}/depends.dot
+ echo "}" >> ${BUILDHISTORY_DIR_IMAGE}/depends.dot
+
+ cat ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.txt
+ rm ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp
+
+ # Produce some cut-down graphs (for readability)
+ grep -v kernel_image ${BUILDHISTORY_DIR_IMAGE}/depends.dot | grep -v kernel_2 | grep -v kernel_3 > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel.dot
+ grep -v libc6 ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel.dot | grep -v libgcc > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc.dot
+ grep -v update_ ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc.dot > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc-noupdate.dot
+ grep -v kernel_module ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc-noupdate.dot > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc-noupdate-nomodules.dot
+
+ # Workaround for broken shell function dependencies
+ if false ; then
+ get_package_filename
+ list_package_depends
+ list_package_recommends
+ fi
+}
+
+buildhistory_get_imageinfo() {
+ if [ "${@base_contains('BUILDHISTORY_FEATURES', 'image', '1', '0', d)}" = "0" ] ; then
+ return
+ fi
+
+ # List the files in the image, but exclude date/time etc.
+ # This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
+ ( cd ${IMAGE_ROOTFS} && find . -ls | awk '{ if ( $7 ~ /[0-9]/ ) printf "%s %10-s %10-s %10s %s %s %s\n", $3, $5, $6, $7, $11, $12, $13 ; else printf "%s %10-s %10-s %10s %s %s %s\n", $3, $5, $6, 0, $10, $11, $12 }' | sort -k5 > ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt )
+
+ # Record some machine-readable meta-information about the image
+ echo -n > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
+ cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
+${@buildhistory_get_imagevars(d)}
+END
+ imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
+ echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
+
+ # Add some configuration information
+ echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id
+
+ cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id <<END
+${@buildhistory_get_layers(d)}
+END
+}
+
+# By prepending we get in before the removal of packaging files
+ROOTFS_POSTPROCESS_COMMAND =+ "buildhistory_get_image_installed ; "
+
+IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
+
+def buildhistory_get_layers(d):
+ layertext = "Configured metadata layers:\n%s\n" % '\n'.join(get_layers_branch_rev(d))
+ return layertext
+
+
+def squashspaces(string):
+ import re
+ return re.sub("\s+", " ", string).strip()
+
+
+def buildhistory_get_imagevars(d):
+ imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
+ listvars = "USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS"
+
+ imagevars = imagevars.split()
+ listvars = listvars.split()
+ ret = ""
+ for var in imagevars:
+ value = d.getVar(var, True) or ""
+ if var in listvars:
+ # Squash out spaces
+ value = squashspaces(value)
+ ret += "%s = %s\n" % (var, value)
+ return ret.rstrip('\n')
+
+
+buildhistory_commit() {
+ if [ ! -d ${BUILDHISTORY_DIR} ] ; then
+ # Code above that creates this dir never executed, so there can't be anything to commit
+ return
+ fi
+
+ ( cd ${BUILDHISTORY_DIR}/
+ # Initialise the repo if necessary
+ if [ ! -d .git ] ; then
+ git init -q
+ fi
+ # Ensure there are new/changed files to commit
+ repostatus=`git status --porcelain`
+ if [ "$repostatus" != "" ] ; then
+ git add ${BUILDHISTORY_DIR}/*
+ HOSTNAME=`hostname 2>/dev/null || echo unknown`
+ # porcelain output looks like "?? packages/foo/bar"
+ for entry in `echo "$repostatus" | awk '{print $2}' | awk -F/ '{print $1}' | sort | uniq` ; do
+ git commit ${BUILDHISTORY_DIR}/$entry -m "$entry: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
+ done
+ if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
+ git push -q ${BUILDHISTORY_PUSH_REPO}
+ fi
+ else
+ git commit ${BUILDHISTORY_DIR}/ --allow-empty -m "No changes: Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
+ fi) || true
+}
+
+python buildhistory_eventhandler() {
+ import bb.build
+ import bb.event
+
+ if isinstance(e, bb.event.BuildCompleted):
+ if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
+ if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
+ bb.build.exec_func("buildhistory_commit", e.data)
+}
+
+addhandler buildhistory_eventhandler
diff --git a/meta/classes/buildstats.bbclass b/meta/classes/buildstats.bbclass
new file mode 100644
index 000000000..dc9afb101
--- /dev/null
+++ b/meta/classes/buildstats.bbclass
@@ -0,0 +1,281 @@
+BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
+BNFILE = "${BUILDSTATS_BASE}/.buildname"
+DEVFILE = "${BUILDSTATS_BASE}/.device"
+
+################################################################################
+# Build statistics gathering.
+#
+# The CPU and Time gathering/tracking functions and bbevent inspiration
+# were written by Christopher Larson and can be seen here:
+# http://kergoth.pastey.net/142813
+#
+################################################################################
+
+def get_process_cputime(pid):
+ fields = open("/proc/%d/stat" % pid, "r").readline().rstrip().split()
+ # 13: utime, 14: stime, 15: cutime, 16: cstime
+ return sum(int(field) for field in fields[13:16])
+
+def get_cputime():
+ fields = open("/proc/stat", "r").readline().rstrip().split()[1:]
+ return sum(int(field) for field in fields)
+
+def set_bn(e):
+ bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
+ try:
+ os.remove(e.data.getVar('BNFILE', True))
+ except:
+ pass
+ file = open(e.data.getVar('BNFILE', True), "w")
+ file.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
+ file.close()
+
+def get_bn(e):
+ file = open(e.data.getVar('BNFILE', True))
+ bn = file.readline()
+ file.close()
+ return bn
+
+def set_device(e):
+ tmpdir = e.data.getVar('TMPDIR', True)
+ try:
+ os.remove(e.data.getVar('DEVFILE', True))
+ except:
+ pass
+ ############################################################################
+ # We look for the volume TMPDIR lives on. To do all disks would make little
+ # sense and not give us any particularly useful data. In theory we could do
+ # something like stick DL_DIR on a different partition and this would
+ # throw stats gathering off. The same goes with SSTATE_DIR. However, let's
+ # get the basics in here and work on the cornercases later.
+ # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
+ # If we end up hitting one of these fs, we'll just skip diskstats collection.
+ ############################################################################
+ device=os.stat(tmpdir)
+ majordev=os.major(device.st_dev)
+ minordev=os.minor(device.st_dev)
+ ############################################################################
+ # Bug 1700:
+ # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
+ # we set rdev to NoLogicalDevice and search for it later. If we find NLD
+ # we do not collect diskstats as the method to collect meaningful statistics
+ # for these fs types requires a bit more research.
+ ############################################################################
+ rdev="NoLogicalDevice"
+ try:
+ for line in open("/proc/diskstats", "r"):
+ if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
+ rdev=line.split()[2]
+ except:
+ pass
+ file = open(e.data.getVar('DEVFILE', True), "w")
+ file.write(rdev)
+ file.close()
+
+def get_device(e):
+ file = open(e.data.getVar('DEVFILE', True))
+ device = file.readline()
+ file.close()
+ return device
+
+def get_diskstats(dev):
+ import itertools
+ ############################################################################
+ # For info on what these are, see kernel doc file iostats.txt
+ ############################################################################
+ DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
+ try:
+ for x in open("/proc/diskstats", "r"):
+ if dev in x:
+ diskstats_val = x.rstrip().split()[4:]
+ except IOError as e:
+ return
+ diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
+ return diskstats
+
+def set_diskdata(var, dev, data):
+ data.setVar(var, get_diskstats(dev))
+
+def get_diskdata(var, dev, data):
+ olddiskdata = data.getVar(var, False)
+ diskdata = {}
+ if olddiskdata is None:
+ return
+ newdiskdata = get_diskstats(dev)
+ for key in olddiskdata.iterkeys():
+ diskdata["Start"+key] = str(int(olddiskdata[key]))
+ diskdata["End"+key] = str(int(newdiskdata[key]))
+ return diskdata
+
+def set_timedata(var, data):
+ import time
+ time = time.time()
+ cputime = get_cputime()
+ proctime = get_process_cputime(os.getpid())
+ data.setVar(var, (time, cputime, proctime))
+
+def get_timedata(var, data):
+ import time
+ timedata = data.getVar(var, False)
+ if timedata is None:
+ return
+ oldtime, oldcpu, oldproc = timedata
+ procdiff = get_process_cputime(os.getpid()) - oldproc
+ cpudiff = get_cputime() - oldcpu
+ timediff = time.time() - oldtime
+ if cpudiff > 0:
+ cpuperc = float(procdiff) * 100 / cpudiff
+ else:
+ cpuperc = None
+ return timediff, cpuperc
+
+def write_task_data(status, logfile, dev, e):
+ bn = get_bn(e)
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
+ file = open(os.path.join(logfile), "a")
+ timedata = get_timedata("__timedata_task", e.data)
+ if timedata:
+ elapsedtime, cpu = timedata
+ file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
+ (e.task, elapsedtime), e.data))
+ if cpu:
+ file.write("CPU usage: %0.1f%% \n" % cpu)
+ ############################################################################
+ # Here we gather up disk data. In an effort to avoid lying with stats
+ # I do a bare minimum of analysis of collected data.
+ # The simple fact is, doing disk io collection on a per process basis
+ # without effecting build time would be difficult.
+ # For the best information, running things with BB_TOTAL_THREADS = "1"
+ # would return accurate per task results.
+ ############################################################################
+ if dev != "NoLogicalDevice":
+ diskdata = get_diskdata("__diskdata_task", dev, e.data)
+ if diskdata:
+ for key in sorted(diskdata.iterkeys()):
+ file.write(key + ": " + diskdata[key] + "\n")
+ if status is "passed":
+ file.write("Status: PASSED \n")
+ else:
+ file.write("Status: FAILED \n")
+ file.write("Ended: %0.2f \n" % time.time())
+ file.close()
+
+python run_buildstats () {
+ import bb.build
+ import bb.event
+ import bb.data
+ import time, subprocess, platform
+
+ if isinstance(e, bb.event.BuildStarted):
+ ########################################################################
+ # at first pass make the buildstats heriarchy and then
+ # set the buildname
+ ########################################################################
+ try:
+ bb.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
+ except:
+ pass
+ set_bn(e)
+ bn = get_bn(e)
+ set_device(e)
+ device = get_device(e)
+
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ try:
+ bb.mkdirhier(bsdir)
+ except:
+ pass
+ if device != "NoLogicalDevice":
+ set_diskdata("__diskdata_build", device, e.data)
+ set_timedata("__timedata_build", e.data)
+ build_time = os.path.join(bsdir, "build_stats")
+ # write start of build into build_time
+ file = open(build_time,"a")
+ host_info = platform.uname()
+ file.write("Host Info: ")
+ for x in host_info:
+ if x:
+ file.write(x + " ")
+ file.write("\n")
+ file.write("Build Started: %0.2f \n" % time.time())
+ file.close()
+
+ elif isinstance(e, bb.event.BuildCompleted):
+ bn = get_bn(e)
+ device = get_device(e)
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
+ build_time = os.path.join(bsdir, "build_stats")
+ file = open(build_time, "a")
+ ########################################################################
+ # Write build statistics for the build
+ ########################################################################
+ timedata = get_timedata("__timedata_build", e.data)
+ if timedata:
+ time, cpu = timedata
+ # write end of build and cpu used into build_time
+ file = open(build_time, "a")
+ file.write("Elapsed time: %0.2f seconds \n" % (time))
+ if cpu:
+ file.write("CPU usage: %0.1f%% \n" % cpu)
+ if device != "NoLogicalDevice":
+ diskio = get_diskdata("__diskdata_build", device, e.data)
+ if diskio:
+ for key in sorted(diskio.iterkeys()):
+ file.write(key + ": " + diskio[key] + "\n")
+ file.close()
+
+ if isinstance(e, bb.build.TaskStarted):
+ bn = get_bn(e)
+ device = get_device(e)
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
+ if device != "NoLogicalDevice":
+ set_diskdata("__diskdata_task", device, e.data)
+ set_timedata("__timedata_task", e.data)
+ try:
+ bb.mkdirhier(taskdir)
+ except:
+ pass
+ # write into the task event file the name and start time
+ file = open(os.path.join(taskdir, e.task), "a")
+ file.write("Event: %s \n" % bb.event.getName(e))
+ file.write("Started: %0.2f \n" % time.time())
+ file.close()
+
+ elif isinstance(e, bb.build.TaskSucceeded):
+ bn = get_bn(e)
+ device = get_device(e)
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
+ write_task_data("passed", os.path.join(taskdir, e.task), device, e)
+ if e.task == "do_rootfs":
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ bs=os.path.join(bsdir, "build_stats")
+ file = open(bs,"a")
+ rootfs = e.data.getVar('IMAGE_ROOTFS', True)
+ rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
+ file.write("Uncompressed Rootfs size: %s" % rootfs_size)
+ file.close()
+
+ elif isinstance(e, bb.build.TaskFailed):
+ bn = get_bn(e)
+ device = get_device(e)
+ bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+ taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
+ write_task_data("failed", os.path.join(taskdir, e.task), device, e)
+ ########################################################################
+ # Lets make things easier and tell people where the build failed in
+ # build_status. We do this here because BuildCompleted triggers no
+ # matter what the status of the build actually is
+ ########################################################################
+ build_status = os.path.join(bsdir, "build_stats")
+ file = open(build_status,"a")
+ file.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task))
+ file.close()
+
+}
+
+addhandler run_buildstats
+
diff --git a/meta/classes/ccache.inc b/meta/classes/ccache.inc
deleted file mode 100644
index d830a1b8f..000000000
--- a/meta/classes/ccache.inc
+++ /dev/null
@@ -1,11 +0,0 @@
-# Make ccache use a TMPDIR specific ccache directory if using the crosscompiler,
-# since it isn't likely to be useful with any other toolchain than the one we just
-# built, and would otherwise push more useful things out of the default cache.
-
-CCACHE_DIR_TARGET = "${TMPDIR}/ccache"
-
-python () {
- if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
- bb.data.setVar('CCACHE_DIR', '${CCACHE_DIR_TARGET}', d)
- bb.data.setVarFlag('CCACHE_DIR', 'export', '1', d)
-}
diff --git a/meta/classes/ccdv.bbclass b/meta/classes/ccdv.bbclass
deleted file mode 100644
index 8c8306cbf..000000000
--- a/meta/classes/ccdv.bbclass
+++ /dev/null
@@ -1,21 +0,0 @@
-python () {
- if bb.data.getVar('PN', d, 1) in ['ccdv-native']:
- if not bb.data.getVar('INHIBIT_DEFAULT_DEPS', d, 1):
- bb.data.setVar("DEPENDS", '%s %s' % ("ccdv-native", bb.data.getVar("DEPENDS", d, 1) or ""), d)
- bb.data.setVar("CC", '%s %s' % ("ccdv", bb.data.getVar("CC", d, 1) or ""), d)
- bb.data.setVar("BUILD_CC", '%s %s' % ("ccdv", bb.data.getVar("BUILD_CC", d, 1) or ""), d)
- bb.data.setVar("CCLD", '%s %s' % ("ccdv", bb.data.getVar("CCLD", d, 1) or ""), d)
-}
-
-def quiet_libtool(bb,d):
- deps = (bb.data.getVar('DEPENDS', d, 1) or "").split()
- if 'libtool-cross' in deps:
- return "'LIBTOOL=${STAGING_BINDIR_NATIVE}/${HOST_SYS}-libtool --silent'"
- elif 'libtool-native' in deps:
- return "'LIBTOOL=${B}/${HOST_SYS}-libtool --silent'"
- else:
- return ""
-
-CCDV = "ccdv"
-EXTRA_OEMAKE_append = " ${@quiet_libtool(bb,d)}"
-MAKE += "-s"
diff --git a/meta/classes/cmake.bbclass b/meta/classes/cmake.bbclass
index a9130f2a0..dcd974ab2 100644
--- a/meta/classes/cmake.bbclass
+++ b/meta/classes/cmake.bbclass
@@ -1,11 +1,112 @@
DEPENDS += " cmake-native "
+# We need to unset CCACHE otherwise cmake gets too confused
+CCACHE = ""
+
# We want the staging and installing functions from autotools
inherit autotools
+# Use in-tree builds by default but allow this to be changed
+# since some packages do not support them (e.g. llvm 2.5).
+OECMAKE_SOURCEPATH ?= "."
+
+# If declaring this, make sure you also set EXTRA_OEMAKE to
+# "-C ${OECMAKE_BUILDPATH}". So it will run the right makefiles.
+OECMAKE_BUILDPATH ?= ""
+
+# C/C++ Compiler (without cpu arch/tune arguments)
+OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
+OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
+
+# Compiler flags
+OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS}"
+OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} -fpermissive"
+OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CPPFLAGS} -DNDEBUG"
+OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
+
+OECMAKE_RPATH ?= ""
+OECMAKE_PERLNATIVE_DIR ??= ""
+OECMAKE_EXTRA_ROOT_PATH ?= ""
+
+cmake_do_generate_toolchain_file() {
+ cat > ${WORKDIR}/toolchain.cmake <<EOF
+# CMake system name must be something like "Linux".
+# This is important for cross-compiling.
+set( CMAKE_SYSTEM_NAME `echo ${SDK_OS} | sed 's/^./\u&/'` )
+set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
+set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
+set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
+set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
+set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
+set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
+set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
+
+# only search in the paths provided so cmake doesnt pick
+# up libraries and tools from the native build machine
+set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} ${EXTERNAL_TOOLCHAIN})
+set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
+set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
+set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
+
+# Use qt.conf settings
+set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
+
+# We need to set the rpath to the correct directory as cmake does not provide any
+# directory as rpath by default
+set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
+
+# Use native cmake modules
+set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
+
+# add for non /usr/lib libdir, e.g. /usr/lib64
+set( CMAKE_LIBRARY_PATH ${libdir} ${base_libdir})
+
+EOF
+}
+
+addtask generate_toolchain_file after do_patch before do_configure
+
cmake_do_configure() {
- cmake . -DCMAKE_INSTALL_PREFIX:PATH=${prefix} -Wno-dev \
- -DCMAKE_FIND_ROOT_PATH=${STAGING_DIR_HOST}
+ if [ ${OECMAKE_BUILDPATH} ]
+ then
+ mkdir -p ${OECMAKE_BUILDPATH}
+ cd ${OECMAKE_BUILDPATH}
+ fi
+
+ # Just like autotools cmake can use a site file to cache result that need generated binaries to run
+ if [ -e ${WORKDIR}/site-file.cmake ] ; then
+ OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
+ else
+ OECMAKE_SITEFILE=""
+ fi
+
+ cmake \
+ ${OECMAKE_SITEFILE} \
+ ${OECMAKE_SOURCEPATH} \
+ -DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
+ -DCMAKE_INSTALL_SO_NO_EXE=0 \
+ -DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
+ -DCMAKE_VERBOSE_MAKEFILE=1 \
+ ${EXTRA_OECMAKE} \
+ -Wno-dev
+}
+
+cmake_do_compile() {
+ if [ ${OECMAKE_BUILDPATH} ]
+ then
+ cd ${OECMAKE_BUILDPATH}
+ fi
+
+ base_do_compile
+}
+
+cmake_do_install() {
+ if [ ${OECMAKE_BUILDPATH} ];
+ then
+ cd ${OECMAKE_BUILDPATH}
+ fi
+
+ autotools_do_install
}
-EXPORT_FUNCTIONS do_configure
+EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/meta/classes/cml1.bbclass b/meta/classes/cml1.bbclass
index 79218b4a1..d429188c7 100644
--- a/meta/classes/cml1.bbclass
+++ b/meta/classes/cml1.bbclass
@@ -6,3 +6,12 @@ cml1_do_configure() {
EXPORT_FUNCTIONS do_configure
addtask configure after do_unpack do_patch before do_compile
+
+inherit terminal
+
+python do_menuconfig() {
+ oe_terminal("make menuconfig", '${PN} Configuration', d)
+}
+do_menuconfig[nostamp] = "1"
+addtask menuconfig after do_configure
+
diff --git a/meta/classes/copyleft_compliance.bbclass b/meta/classes/copyleft_compliance.bbclass
new file mode 100644
index 000000000..3ca7337b5
--- /dev/null
+++ b/meta/classes/copyleft_compliance.bbclass
@@ -0,0 +1,104 @@
+# Deploy sources for recipes for compliance with copyleft-style licenses
+# Defaults to using symlinks, as it's a quick operation, and one can easily
+# follow the links when making use of the files (e.g. tar with the -h arg).
+#
+# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
+#
+# vi:sts=4:sw=4:et
+
+COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
+
+COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
+COPYLEFT_LICENSE_INCLUDE[type] = 'list'
+COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
+
+COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
+COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
+COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which exclude licenses'
+
+COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
+COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
+
+COPYLEFT_RECIPE_TYPES ?= 'target'
+COPYLEFT_RECIPE_TYPES[type] = 'list'
+COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
+
+COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
+COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
+COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
+
+def copyleft_recipe_type(d):
+ for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
+ if oe.utils.inherits(d, recipe_type):
+ return recipe_type
+ return 'target'
+
+def copyleft_should_include(d):
+ """Determine if this recipe's sources should be deployed for compliance"""
+ import ast
+ import oe.license
+ from fnmatch import fnmatchcase as fnmatch
+
+ recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
+ if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
+ return False, 'recipe type "%s" is excluded' % recipe_type
+
+ include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
+ exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
+
+ try:
+ is_included, reason = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
+ except oe.license.LicenseError as exc:
+ bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
+ else:
+ if is_included:
+ return True, 'recipe has included licenses: %s' % ', '.join(reason)
+ else:
+ return False, 'recipe has excluded licenses: %s' % ', '.join(reason)
+
+python do_prepare_copyleft_sources () {
+ """Populate a tree of the recipe sources and emit patch series files"""
+ import os.path
+ import shutil
+
+ p = d.getVar('P', True)
+ included, reason = copyleft_should_include(d)
+ if not included:
+ bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
+ return
+ else:
+ bb.debug(1, 'copyleft: %s is included: %s' % (p, reason))
+
+ sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', True)
+ src_uri = d.getVar('SRC_URI', True).split()
+ fetch = bb.fetch2.Fetch(src_uri, d)
+ ud = fetch.ud
+
+ locals = (fetch.localpath(url) for url in fetch.urls)
+ localpaths = [local for local in locals if not local.endswith('.bb')]
+ if not localpaths:
+ return
+
+ pf = d.getVar('PF', True)
+ dest = os.path.join(sources_dir, pf)
+ shutil.rmtree(dest, ignore_errors=True)
+ bb.mkdirhier(dest)
+
+ for path in localpaths:
+ os.symlink(path, os.path.join(dest, os.path.basename(path)))
+
+ patches = src_patches(d)
+ for patch in patches:
+ _, _, local, _, _, parm = bb.decodeurl(patch)
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
+ else:
+ series = os.path.join(dest, 'series')
+
+ with open(series, 'a') as s:
+ s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
+}
+
+addtask prepare_copyleft_sources after do_fetch before do_build
+do_build[recrdeptask] += 'do_prepare_copyleft_sources'
diff --git a/meta/classes/core-image.bbclass b/meta/classes/core-image.bbclass
new file mode 100644
index 000000000..e2ad0fcf3
--- /dev/null
+++ b/meta/classes/core-image.bbclass
@@ -0,0 +1,72 @@
+# Common code for generating core reference images
+#
+# Copyright (C) 2007-2011 Linux Foundation
+
+LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=3f40d7994397109285ec7b81fdeb3b58 \
+ file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
+
+# IMAGE_FEATURES control content of the core reference images
+#
+# By default we install task-core-boot and task-base packages - this gives us
+# working (console only) rootfs.
+#
+# Available IMAGE_FEATURES:
+#
+# - apps-console-core
+# - x11-mini - minimal environment for X11 server
+# - x11-base - X11 server + minimal desktop
+# - x11-sato - OpenedHand Sato environment
+# - x11-netbook - Metacity based environment for netbooks
+# - apps-x11-core - X Terminal, file manager, file editor
+# - apps-x11-games
+# - apps-x11-pimlico - OpenedHand Pimlico apps
+# - tools-sdk - SDK
+# - tools-debug - debugging tools
+# - tools-profile - profiling tools
+# - tools-testapps - tools usable to make some device tests
+# - nfs-server - NFS server (exports / over NFS to everybody)
+# - ssh-server-dropbear - SSH server (dropbear)
+# - ssh-server-openssh - SSH server (openssh)
+# - debug-tweaks - makes an image suitable for development
+#
+PACKAGE_GROUP_apps-console-core = "task-core-apps-console"
+PACKAGE_GROUP_x11-mini = "task-core-x11-mini"
+PACKAGE_GROUP_x11-base = "task-core-x11-base"
+PACKAGE_GROUP_x11-sato = "task-core-x11-sato"
+PACKAGE_GROUP_x11-netbook = "task-core-x11-netbook"
+PACKAGE_GROUP_apps-x11-core = "task-core-apps-x11-core"
+PACKAGE_GROUP_apps-x11-games = "task-core-apps-x11-games"
+PACKAGE_GROUP_apps-x11-pimlico = "task-core-apps-x11-pimlico"
+PACKAGE_GROUP_tools-debug = "task-core-tools-debug"
+PACKAGE_GROUP_tools-profile = "task-core-tools-profile"
+PACKAGE_GROUP_tools-testapps = "task-core-tools-testapps"
+PACKAGE_GROUP_tools-sdk = "task-core-sdk task-core-standalone-sdk-target"
+PACKAGE_GROUP_nfs-server = "task-core-nfs-server"
+PACKAGE_GROUP_ssh-server-dropbear = "task-core-ssh-dropbear"
+PACKAGE_GROUP_ssh-server-openssh = "task-core-ssh-openssh"
+PACKAGE_GROUP_package-management = "${ROOTFS_PKGMANAGE}"
+PACKAGE_GROUP_qt4-pkgs = "task-core-qt-demos"
+
+CORE_IMAGE_BASE_INSTALL = '\
+ task-core-boot \
+ task-base-extended \
+ \
+ ${CORE_IMAGE_EXTRA_INSTALL} \
+ '
+
+CORE_IMAGE_EXTRA_INSTALL ?= ""
+
+IMAGE_INSTALL ?= "${CORE_IMAGE_BASE_INSTALL}"
+
+X11_IMAGE_FEATURES = "x11-base apps-x11-core package-management"
+ENHANCED_IMAGE_FEATURES = "${X11_IMAGE_FEATURES} apps-x11-games apps-x11-pimlico package-management"
+SATO_IMAGE_FEATURES = "${ENHANCED_IMAGE_FEATURES} x11-sato ssh-server-dropbear"
+
+inherit image
+
+# Create /etc/timestamp during image construction to give a reasonably sane default time setting
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+
+# Zap the root password if debug-tweaks feature is not enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_root_password ; ",d)}'
+
diff --git a/meta/classes/cpan-base.bbclass b/meta/classes/cpan-base.bbclass
index cc0d11e51..b4b7b81d8 100644
--- a/meta/classes/cpan-base.bbclass
+++ b/meta/classes/cpan-base.bbclass
@@ -2,52 +2,43 @@
# cpan-base providers various perl related information needed for building
# cpan modules
#
-FILES_${PN} += "${libdir}/perl5 ${datadir}/perl5"
+FILES_${PN} += "${libdir}/perl ${datadir}/perl"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
RDEPENDS += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
+PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
+
# Determine the staged version of perl from the perl configuration file
def get_perl_version(d):
- import os, bb, re
- cfg = bb.data.expand('${STAGING_DIR}/${HOST_SYS}/perl/config.sh', d)
+ import re
+ cfg = d.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh')
try:
f = open(cfg, 'r')
except IOError:
return None
l = f.readlines();
f.close();
- r = re.compile("version='(\d\.\d\.\d)'")
+ r = re.compile("^version='(\d*\.\d*\.\d*)'")
for s in l:
m = r.match(s)
if m:
return m.group(1)
return None
-# Only 5.8.7 and 5.8.4 existed at the time we moved to the new layout
-def is_new_perl(d):
- ver = get_perl_version(d)
- if ver == "5.8.4" or ver == "5.8.7":
- return "no"
- return "yes"
-
# Determine where the library directories are
def perl_get_libdirs(d):
- import bb
- libdir = bb.data.getVar('libdir', d, 1)
- if is_new_perl(d) == "yes":
- libdirs = libdir + '/perl5'
- else:
- libdirs = libdir + '/*/*/perl5'
- return libdirs
+ libdir = d.getVar('libdir', True)
+ if is_target(d) == "no":
+ libdir += '/perl-native'
+ libdir += '/perl'
+ return libdir
def is_target(d):
- import bb
if not bb.data.inherits_class('native', d):
return "yes"
return "no"
-IS_NEW_PERL = "${@is_new_perl(d)}"
PERLLIBDIRS = "${@perl_get_libdirs(d)}"
FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
diff --git a/meta/classes/cpan.bbclass b/meta/classes/cpan.bbclass
index ca5303d97..587e688bc 100644
--- a/meta/classes/cpan.bbclass
+++ b/meta/classes/cpan.bbclass
@@ -1,65 +1,44 @@
#
# This is for perl modules that use the old Makefile.PL build system
#
-inherit cpan-base
+inherit cpan-base perlnative
EXTRA_CPANFLAGS ?= ""
+EXTRA_PERLFLAGS ?= ""
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
# Env var which tells perl where the perl include files are
-export PERL_INC = "${STAGING_LIBDIR}/perl/${@get_perl_version(d)}/CORE"
+export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
+export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
+export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
+export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
cpan_do_configure () {
- yes '' | perl Makefile.PL ${EXTRA_CPANFLAGS}
+ export PERL5LIB="${PERL_ARCHLIB}"
+ yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
- . ${STAGING_DIR}/${TARGET_SYS}/perl/config.sh
- if [ "${IS_NEW_PERL}" = "yes" ]; then
- sed -i -e "s:\(SITELIBEXP = \).*:\1${sitelibexp}:" \
- -e "s:\(SITEARCHEXP = \).*:\1${sitearchexp}:" \
- -e "s:\(INSTALLVENDORLIB = \).*:\1${D}${datadir}/perl5:" \
- -e "s:\(INSTALLVENDORARCH = \).*:\1${D}${libdir}/perl5:" \
- -e "s:\(LDDLFLAGS.*\)${STAGING_LIBDIR_NATIVE}:\1${STAGING_LIBDIR}:" \
- Makefile
- else
- sed -i -e "s:\(SITELIBEXP = \).*:\1${sitelibexp}:" \
- -e "s:\(SITEARCHEXP = \).*:\1${sitearchexp}:" \
- -e "s:\(INSTALLVENDORLIB = \).*:\1${D}${libdir}/perl5/site_perl/${version}:" \
- -e "s:\(INSTALLVENDORARCH = \).*:\1${D}${libdir}/perl5/site_perl/${version}:" \
- -e "s:\(LDDLFLAGS.*\)${STAGING_LIBDIR_NATIVE}:\1${STAGING_LIBDIR}:" \
- Makefile
- fi
+ . ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
+ # Use find since there can be a Makefile generated for each Makefile.PL
+ for f in `find -name Makefile.PL`; do
+ f2=`echo $f | sed -e 's/.PL//'`
+ sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
+ -e 's/perl.real/perl/' \
+ $f2
+ done
fi
}
cpan_do_compile () {
- if [ "${IS_NEW_PERL}" = "yes" ]; then
- oe_runmake PASTHRU_INC="${CFLAGS}" CCFLAGS="${CFLAGS}" LD="${CCLD}"
- else
- # You must use gcc to link on sh
- OPTIONS=""
- if test ${TARGET_ARCH} = "sh3" -o ${TARGET_ARCH} = "sh4"; then
- OPTIONS="LD=${TARGET_ARCH}-${TARGET_OS}-gcc"
- fi
- if test ${TARGET_ARCH} = "powerpc" ; then
- OPTIONS="LD=${TARGET_ARCH}-${TARGET_OS}-gcc"
- fi
- oe_runmake PASTHRU_INC="${CFLAGS}" CCFLAGS="${CFLAGS}" $OPTIONS
- fi
+ oe_runmake PASTHRU_INC="${CFLAGS}" CCFLAGS="${CFLAGS}" LD="${CCLD}"
}
cpan_do_install () {
- if [ ${@is_target(d)} = "yes" ]; then
- oe_runmake install_vendor
- fi
-}
-
-cpan_do_stage () {
- if [ ${@is_target(d)} = "no" ]; then
- oe_runmake install_vendor
- fi
+ oe_runmake DESTDIR="${D}" install_vendor
+ for PERLSCRIPT in `grep -rIEl '#!${bindir}/perl-native.*/perl' ${D}`; do
+ sed -i -e 's|^#!${bindir}/perl-native.*/perl|#!/usr/bin/env nativeperl|' $PERLSCRIPT
+ done
}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install do_stage
+EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cpan_build.bbclass b/meta/classes/cpan_build.bbclass
index 63e716c09..36ffc56b8 100644
--- a/meta/classes/cpan_build.bbclass
+++ b/meta/classes/cpan_build.bbclass
@@ -3,8 +3,6 @@
#
inherit cpan-base
-INHIBIT_NATIVE_STAGE_INSTALL = "1"
-
#
# We also need to have built libmodule-build-perl-native for
# everything except libmodule-build-perl-native itself (which uses
@@ -12,10 +10,9 @@ INHIBIT_NATIVE_STAGE_INSTALL = "1"
# libmodule-build-perl)
#
def cpan_build_dep_prepend(d):
- import bb;
- if bb.data.getVar('CPAN_BUILD_DEPS', d, 1):
+ if d.getVar('CPAN_BUILD_DEPS', True):
return ''
- pn = bb.data.getVar('PN', d, 1)
+ pn = d.getVar('PN', True)
if pn in ['libmodule-build-perl', 'libmodule-build-perl-native']:
return ''
return 'libmodule-build-perl-native '
@@ -25,29 +22,19 @@ DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}"
cpan_build_do_configure () {
if [ ${@is_target(d)} == "yes" ]; then
# build for target
- . ${STAGING_DIR}/${TARGET_SYS}/perl/config.sh
- if [ "${IS_NEW_PERL}" = "yes" ]; then
- perl Build.PL --installdirs vendor \
- --destdir ${D} \
- --install_path lib="${datadir}/perl5" \
- --install_path arch="${libdir}/perl5" \
- --install_path script=${bindir} \
- --install_path bin=${bindir} \
- --install_path bindoc=${mandir}/man1 \
- --install_path libdoc=${mandir}/man3
- else
+ . ${STAGING_LIBDIR}/perl/config.sh
+
perl Build.PL --installdirs vendor \
--destdir ${D} \
- --install_path lib="${libdir}/perl5/site_perl/${version}" \
- --install_path arch="${libdir}/perl5/site_perl/${version}/${TARGET_SYS}" \
+ --install_path lib="${datadir}/perl" \
+ --install_path arch="${libdir}/perl" \
--install_path script=${bindir} \
--install_path bin=${bindir} \
--install_path bindoc=${mandir}/man1 \
--install_path libdoc=${mandir}/man3
- fi
else
# build for host
- perl Build.PL --installdirs site
+ perl Build.PL --installdirs site --destdir ${D}
fi
}
@@ -56,15 +43,7 @@ cpan_build_do_compile () {
}
cpan_build_do_install () {
- if [ ${@is_target(d)} == "yes" ]; then
- perl Build install
- fi
-}
-
-do_stage_append () {
- if [ ${@is_target(d)} == "no" ]; then
- perl Build install
- fi
+ perl Build install
}
EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/cross-canadian.bbclass b/meta/classes/cross-canadian.bbclass
new file mode 100644
index 000000000..ed53118a2
--- /dev/null
+++ b/meta/classes/cross-canadian.bbclass
@@ -0,0 +1,89 @@
+#
+# NOTE - When using this class the user is repsonsible for ensuring that
+# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
+# is changed, another nativesdk xxx-canadian-cross can be installed
+#
+
+
+# SDK packages are built either explicitly by the user,
+# or indirectly via dependency. No need to be in 'world'.
+EXCLUDE_FROM_WORLD = "1"
+CLASSOVERRIDE = "class-cross-canadian"
+STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+#
+# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
+#
+PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
+python () {
+ archs = d.getVar('PACKAGE_ARCHS', True).split()
+ sdkarchs = []
+ for arch in archs:
+ sdkarchs.append(arch + '-nativesdk')
+ d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
+}
+MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
+
+INHIBIT_DEFAULT_DEPS = "1"
+
+STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}"
+
+TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}"
+
+PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
+PKGDATA_DIR = "${TMPDIR}/pkgdata/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}"
+PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}/"
+
+HOST_ARCH = "${SDK_ARCH}"
+HOST_VENDOR = "${SDK_VENDOR}"
+HOST_OS = "${SDK_OS}"
+HOST_PREFIX = "${SDK_PREFIX}"
+HOST_CC_ARCH = "${SDK_CC_ARCH}"
+HOST_LD_ARCH = "${SDK_LD_ARCH}"
+HOST_AS_ARCH = "${SDK_AS_ARCH}"
+
+#assign DPKG_ARCH
+DPKG_ARCH = "${SDK_ARCH}"
+
+CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+CFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CFLAGS}"
+LDFLAGS = "${BUILDSDK_LDFLAGS} \
+ -Wl,-rpath-link,${STAGING_LIBDIR}/.. \
+ -Wl,-rpath,${libdir}/.. "
+
+DEPENDS_GETTEXT = "gettext-native gettext-nativesdk"
+
+# Path mangling needed by the cross packaging
+# Note that we use := here to ensure that libdir and includedir are
+# target paths.
+target_libdir := "${libdir}"
+target_includedir := "${includedir}"
+target_base_libdir := "${base_libdir}"
+target_prefix := "${prefix}"
+target_exec_prefix := "${exec_prefix}"
+
+# Change to place files in SDKPATH
+base_prefix = "${SDKPATHNATIVE}"
+prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+bindir = "${exec_prefix}/bin/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
+sbindir = "${bindir}"
+base_bindir = "${bindir}"
+base_sbindir = "${bindir}"
+libdir = "${exec_prefix}/lib/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
+libexecdir = "${exec_prefix}/libexec/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+FILES_${PN} = "${prefix}"
+FILES_${PN}-dbg += "${prefix}/.debug \
+ ${prefix}/bin/.debug \
+ "
+
+export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
+
+# Cross-canadian packages need to pull in nativesdk dynamic libs
+SHLIBSDIR = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${BUILD_OS}/shlibs"
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/cross.bbclass b/meta/classes/cross.bbclass
index 9dddca127..e99830723 100644
--- a/meta/classes/cross.bbclass
+++ b/meta/classes/cross.bbclass
@@ -1,21 +1,26 @@
+inherit relocatable
+
# Cross packages are built indirectly via dependency,
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
-# Save PACKAGE_ARCH before changing HOST_ARCH
-OLD_PACKAGE_ARCH := "${PACKAGE_ARCH}"
-PACKAGE_ARCH = "${OLD_PACKAGE_ARCH}"
-# Also save BASE_PACKAGE_ARCH since HOST_ARCH can influence it
-OLD_BASE_PACKAGE_ARCH := "${BASE_PACKAGE_ARCH}"
-BASE_PACKAGE_ARCH = "${OLD_BASE_PACKAGE_ARCH}"
-
+CLASSOVERRIDE = "class-cross"
PACKAGES = ""
+PACKAGES_DYNAMIC = ""
+PACKAGES_DYNAMIC_virtclass-native = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_VENDOR = "${BUILD_VENDOR}"
HOST_OS = "${BUILD_OS}"
HOST_PREFIX = "${BUILD_PREFIX}"
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+HOST_LD_ARCH = "${BUILD_LD_ARCH}"
+HOST_AS_ARCH = "${BUILD_AS_ARCH}"
+
+STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
+
+export PKG_CONFIG_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
@@ -25,40 +30,49 @@ LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
TOOLCHAIN_OPTIONS = ""
+DEPENDS_GETTEXT = "gettext-native"
+
+# Path mangling needed by the cross packaging
+# Note that we use := here to ensure that libdir and includedir are
+# target paths.
+target_base_prefix := "${base_prefix}"
+target_prefix := "${prefix}"
+target_exec_prefix := "${exec_prefix}"
+target_base_libdir = "${target_base_prefix}/${baselib}"
+target_libdir = "${target_exec_prefix}/${baselib}"
+target_includedir := "${includedir}"
+
# Overrides for paths
+CROSS_TARGET_SYS_DIR = "${MULTIMACH_TARGET_SYS}"
+prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+base_prefix = "${STAGING_DIR_NATIVE}"
+exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
+sbindir = "${bindir}"
+base_bindir = "${bindir}"
+base_sbindir = "${bindir}"
+libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
+libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
-# Path prefixes
-base_prefix = "${exec_prefix}"
-prefix = "${CROSS_DIR}"
-exec_prefix = "${prefix}"
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}"
-# Base paths
-base_bindir = "${base_prefix}/bin"
-base_sbindir = "${base_prefix}/bin"
-base_libdir = "${base_prefix}/lib"
+python cross_virtclass_handler () {
+ if not isinstance(e, bb.event.RecipePreFinalise):
+ return
-# Architecture independent paths
-datadir = "${prefix}/share"
-sysconfdir = "${prefix}/etc"
-sharedstatedir = "${prefix}/com"
-localstatedir = "${prefix}/var"
-infodir = "${datadir}/info"
-mandir = "${datadir}/man"
-docdir = "${datadir}/doc"
-servicedir = "${prefix}/srv"
+ classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
+ if "cross" not in classextend:
+ return
-# Architecture dependent paths
-bindir = "${exec_prefix}/bin"
-sbindir = "${exec_prefix}/bin"
-libexecdir = "${exec_prefix}/libexec"
-libdir = "${exec_prefix}/lib"
-includedir = "${exec_prefix}/include"
-oldincludedir = "${exec_prefix}/include"
+ pn = e.data.getVar("PN", True)
+ if not pn.endswith("-cross"):
+ return
-do_stage () {
- oe_runmake install
+ bb.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-cross", e.data)
}
+addhandler cross_virtclass_handler
+
do_install () {
- :
+ oe_runmake 'DESTDIR=${D}' install
}
diff --git a/meta/classes/crosssdk.bbclass b/meta/classes/crosssdk.bbclass
new file mode 100644
index 000000000..93aba7022
--- /dev/null
+++ b/meta/classes/crosssdk.bbclass
@@ -0,0 +1,30 @@
+inherit cross
+
+CLASSOVERRIDE = "class-crosssdk"
+PACKAGE_ARCH = "${SDK_ARCH}"
+python () {
+ # set TUNE_PKGARCH to SDK_ARCH
+ d.setVar('TUNE_PKGARCH', d.getVar('SDK_ARCH', True))
+}
+
+STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${SDK_OS}"
+STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
+
+TARGET_ARCH = "${SDK_ARCH}"
+TARGET_VENDOR = "${SDK_VENDOR}"
+TARGET_OS = "${SDK_OS}"
+TARGET_PREFIX = "${SDK_PREFIX}"
+TARGET_CC_ARCH = "${SDK_CC_ARCH}"
+TARGET_LD_ARCH = "${SDK_LD_ARCH}"
+TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_FPU = ""
+
+target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
+target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
+target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
+target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+target_exec_prefix = "${SDKPATHNATIVE}${exec_prefix_nativesdk}"
+baselib = "lib"
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/debian.bbclass b/meta/classes/debian.bbclass
index 0afe9fcc3..3637e2ebe 100644
--- a/meta/classes/debian.bbclass
+++ b/meta/classes/debian.bbclass
@@ -8,16 +8,25 @@
#
# Better expressed as ensure all RDEPENDS package before we package
# This means we can't have circular RDEPENDS/RRECOMMENDS
-do_package_write_ipk[rdeptask] = "do_package"
-do_package_write_deb[rdeptask] = "do_package"
-do_package_write_tar[rdeptask] = "do_package"
-do_package_write_rpm[rdeptask] = "do_package"
+DEBIANRDEP = "do_package"
+do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
+do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
+do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
+do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
+
+python () {
+ if not d.getVar("PACKAGES", True):
+ d.setVar("DEBIANRDEP", "")
+}
python debian_package_name_hook () {
import glob, copy, stat, errno, re
- workdir = bb.data.getVar('WORKDIR', d, 1)
- packages = bb.data.getVar('PACKAGES', d, 1)
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES', True)
+ bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
+ lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
+ so_re = re.compile("lib.*\.so")
def socrunch(s):
s = s.lower().replace('_', '-')
@@ -39,13 +48,10 @@ python debian_package_name_hook () {
return (s[stat.ST_MODE] & stat.S_IEXEC)
def auto_libname(packages, orig_pkg):
- bin_re = re.compile(".*/s?bin$")
- lib_re = re.compile(".*/lib$")
- so_re = re.compile("lib.*\.so")
sonames = []
has_bins = 0
has_libs = 0
- pkg_dir = os.path.join(workdir, "install", orig_pkg)
+ pkg_dir = os.path.join(pkgdest, orig_pkg)
for root, dirs, files in os.walk(pkg_dir):
if bin_re.match(root) and files:
has_bins = 1
@@ -54,7 +60,7 @@ python debian_package_name_hook () {
for f in files:
if so_re.match(f):
fp = os.path.join(root, f)
- cmd = (bb.data.getVar('BUILD_PREFIX', d, 1) or "") + "objdump -p " + fp + " 2>/dev/null"
+ cmd = (d.getVar('BUILD_PREFIX', True) or "") + "objdump -p " + fp + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
@@ -68,7 +74,7 @@ python debian_package_name_hook () {
if len(sonames) == 1:
soname = sonames[0]
elif len(sonames) > 1:
- lead = bb.data.getVar('LEAD_SONAME', d, 1)
+ lead = d.getVar('LEAD_SONAME', True)
if lead:
r = re.compile(lead)
filtered = []
@@ -89,19 +95,29 @@ python debian_package_name_hook () {
if soname_result:
(pkgname, devname) = soname_result
for pkg in packages.split():
- if (bb.data.getVar('PKG_' + pkg, d) or bb.data.getVar('DEBIAN_NOAUTONAME_' + pkg, d)):
+ if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)):
continue
- debian_pn = bb.data.getVar('DEBIANNAME_' + pkg, d)
+ debian_pn = d.getVar('DEBIANNAME_' + pkg)
if debian_pn:
newpkg = debian_pn
elif pkg == orig_pkg:
newpkg = pkgname
else:
newpkg = pkg.replace(orig_pkg, devname, 1)
+ mlpre=d.getVar('MLPREFIX', True)
+ if mlpre:
+ if not newpkg.find(mlpre) == 0:
+ newpkg = mlpre + newpkg
if newpkg != pkg:
- bb.data.setVar('PKG_' + pkg, newpkg, d)
+ d.setVar('PKG_' + pkg, newpkg)
- for pkg in (bb.data.getVar('AUTO_LIBNAME_PKGS', d, 1) or "").split():
+ # reversed sort is needed when some package is substring of another
+ # ie in ncurses we get without reverse sort:
+ # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
+ # and later
+ # DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
+ # so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
+ for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', True) or "").split(), reverse=True):
auto_libname(packages, pkg)
}
diff --git a/meta/classes/deploy.bbclass b/meta/classes/deploy.bbclass
new file mode 100644
index 000000000..c3371421d
--- /dev/null
+++ b/meta/classes/deploy.bbclass
@@ -0,0 +1,11 @@
+DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
+SSTATETASKS += "do_deploy"
+do_deploy[sstate-name] = "deploy"
+do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
+do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
+
+python do_deploy_setscene () {
+ sstate_setscene(d)
+}
+addtask do_deploy_setscene
+do_deploy[dirs] = "${DEPLOYDIR} ${B}"
diff --git a/meta/classes/devshell.bbclass b/meta/classes/devshell.bbclass
index 2944dbbfc..ddb6e5530 100644
--- a/meta/classes/devshell.bbclass
+++ b/meta/classes/devshell.bbclass
@@ -1,21 +1,10 @@
-EXTRA_OEMAKE[export] = "1"
+inherit terminal
-do_devshell[dirs] = "${S}"
-do_devshell[nostamp] = "1"
-
-export DISPLAY
-export XAUTHORITY
-
-devshell_do_devshell() {
- export TERMWINDOWTITLE="Bitbake Developer Shell"
- ${TERMCMD}
- if [ $? -ne 0 ]; then
- echo "Fatal: '${TERMCMD}' not found. Check TERMCMD variable."
- exit 1
- fi
+python do_devshell () {
+ oe_terminal(d.getVar('SHELL', True), 'OpenEmbedded Developer Shell', d)
}
-addtask devshell after do_patch
+addtask devshell after do_patch
-EXPORT_FUNCTIONS do_devshell
-
+do_devshell[dirs] = "${S}"
+do_devshell[nostamp] = "1"
diff --git a/meta/classes/distrodata.bbclass b/meta/classes/distrodata.bbclass
new file mode 100644
index 000000000..4b2dee5b1
--- /dev/null
+++ b/meta/classes/distrodata.bbclass
@@ -0,0 +1,763 @@
+require conf/distro/include/distro_tracking_fields.inc
+
+addhandler distro_eventhandler
+python distro_eventhandler() {
+
+ if bb.event.getName(e) == "BuildStarted":
+ import oe.distro_check as dc
+ logfile = dc.create_log_file(e.data, "distrodata.csv")
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ f = open(logfile, "a")
+ f.write("Package,Description,Owner,License,ChkSum,Status,VerMatch,Version,Upsteam,Non-Update,Reason,Recipe Status,Distro 1,Distro 2,Distro 3\n")
+ f.close()
+ bb.utils.unlockfile(lf)
+
+ return
+}
+
+addtask distrodata_np
+do_distrodata_np[nostamp] = "1"
+python do_distrodata_np() {
+ localdata = bb.data.createCopy(d)
+ pn = d.getVar("PN", True)
+ bb.note("Package Name: %s" % pn)
+
+ import oe.distro_check as dist_check
+ tmpdir = d.getVar('TMPDIR', True)
+ distro_check_dir = os.path.join(tmpdir, "distro_check")
+ datetime = localdata.getVar('DATETIME', True)
+ dist_check.update_distro_data(distro_check_dir, datetime)
+
+ if pn.find("-native") != -1:
+ pnstripped = pn.split("-native")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-nativesdk") != -1:
+ pnstripped = pn.split("-nativesdk")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-cross") != -1:
+ pnstripped = pn.split("-cross")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-crosssdk") != -1:
+ pnstripped = pn.split("-crosssdk")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-initial") != -1:
+ pnstripped = pn.split("-initial")
+ bb.note("initial Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ """generate package information from .bb file"""
+ pname = localdata.getVar('PN', True)
+ pcurver = localdata.getVar('PV', True)
+ pdesc = localdata.getVar('DESCRIPTION', True)
+ if pdesc is not None:
+ pdesc = pdesc.replace(',','')
+ pdesc = pdesc.replace('\n','')
+
+ pgrp = localdata.getVar('SECTION', True)
+ plicense = localdata.getVar('LICENSE', True).replace(',','_')
+ if localdata.getVar('LIC_FILES_CHKSUM', True):
+ pchksum="1"
+ else:
+ pchksum="0"
+
+ if localdata.getVar('RECIPE_STATUS', True):
+ hasrstatus="1"
+ else:
+ hasrstatus="0"
+
+ rstatus = localdata.getVar('RECIPE_STATUS', True)
+ if rstatus is not None:
+ rstatus = rstatus.replace(',','')
+
+ pupver = localdata.getVar('RECIPE_LATEST_VERSION', True)
+ if pcurver == pupver:
+ vermatch="1"
+ else:
+ vermatch="0"
+ noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
+ if noupdate_reason is None:
+ noupdate="0"
+ else:
+ noupdate="1"
+ noupdate_reason = noupdate_reason.replace(',','')
+
+ ris = localdata.getVar('RECIPE_INTEL_SECTION', True)
+ maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ rttr = localdata.getVar('RECIPE_TIME_BETWEEN_LAST_TWO_RELEASES', True)
+ rlrd = localdata.getVar('RECIPE_LATEST_RELEASE_DATE', True)
+ dc = localdata.getVar('DEPENDENCY_CHECK', True)
+ rc = localdata.getVar('RECIPE_COMMENTS', True)
+ result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
+
+ bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s, %s, %s, %s\n" % \
+ (pname, pdesc, maintainer, plicense, pchksum, hasrstatus, vermatch, pcurver, pupver, noupdate, noupdate_reason, rstatus))
+ line = pn
+ for i in result:
+ line = line + "," + i
+ bb.note("%s\n" % line)
+}
+
+addtask distrodata
+do_distrodata[nostamp] = "1"
+python do_distrodata() {
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ logfile = os.path.join(logpath, "distrodata.csv")
+
+ import oe.distro_check as dist_check
+ localdata = bb.data.createCopy(d)
+ tmpdir = d.getVar('TMPDIR', True)
+ distro_check_dir = os.path.join(tmpdir, "distro_check")
+ datetime = localdata.getVar('DATETIME', True)
+ dist_check.update_distro_data(distro_check_dir, datetime)
+
+ pn = d.getVar("PN", True)
+ bb.note("Package Name: %s" % pn)
+
+ if pn.find("-native") != -1:
+ pnstripped = pn.split("-native")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-cross") != -1:
+ pnstripped = pn.split("-cross")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pn.find("-initial") != -1:
+ pnstripped = pn.split("-initial")
+ bb.note("initial Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ """generate package information from .bb file"""
+ pname = localdata.getVar('PN', True)
+ pcurver = localdata.getVar('PV', True)
+ pdesc = localdata.getVar('DESCRIPTION', True)
+ if pdesc is not None:
+ pdesc = pdesc.replace(',','')
+ pdesc = pdesc.replace('\n','')
+
+ pgrp = localdata.getVar('SECTION', True)
+ plicense = localdata.getVar('LICENSE', True).replace(',','_')
+ if localdata.getVar('LIC_FILES_CHKSUM', True):
+ pchksum="1"
+ else:
+ pchksum="0"
+
+ if localdata.getVar('RECIPE_STATUS', True):
+ hasrstatus="1"
+ else:
+ hasrstatus="0"
+
+ rstatus = localdata.getVar('RECIPE_STATUS', True)
+ if rstatus is not None:
+ rstatus = rstatus.replace(',','')
+
+ pupver = localdata.getVar('RECIPE_LATEST_VERSION', True)
+ if pcurver == pupver:
+ vermatch="1"
+ else:
+ vermatch="0"
+
+ noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
+ if noupdate_reason is None:
+ noupdate="0"
+ else:
+ noupdate="1"
+ noupdate_reason = noupdate_reason.replace(',','')
+
+ ris = localdata.getVar('RECIPE_INTEL_SECTION', True)
+ maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+ rttr = localdata.getVar('RECIPE_TIME_BETWEEN_LAST_TWO_RELEASES', True)
+ rlrd = localdata.getVar('RECIPE_LATEST_RELEASE_DATE', True)
+ dc = localdata.getVar('DEPENDENCY_CHECK', True)
+ rc = localdata.getVar('RECIPE_COMMENTS', True)
+ # do the comparison
+ result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
+
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ f = open(logfile, "a")
+ f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % \
+ (pname, pdesc, maintainer, plicense, pchksum, hasrstatus, vermatch, pcurver, pupver, noupdate, noupdate_reason, rstatus))
+ line = ""
+ for i in result:
+ line = line + "," + i
+ f.write(line + "\n")
+ f.close()
+ bb.utils.unlockfile(lf)
+}
+
+addtask distrodataall after do_distrodata
+do_distrodataall[recrdeptask] = "do_distrodata"
+do_distrodataall[nostamp] = "1"
+do_distrodataall() {
+ :
+}
+
+addhandler checkpkg_eventhandler
+python checkpkg_eventhandler() {
+ if bb.event.getName(e) == "BuildStarted":
+ import oe.distro_check as dc
+ logfile = dc.create_log_file(e.data, "checkpkg.csv")
+
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ f = open(logfile, "a")
+ f.write("Package\tVersion\tUpver\tLicense\tSection\tHome\tRelease\tPriority\tDepends\tBugTracker\tPE\tDescription\tStatus\tTracking\tURI\tMAINTAINER\n")
+ f.close()
+ bb.utils.unlockfile(lf)
+ return
+}
+
+addtask checkpkg
+do_checkpkg[nostamp] = "1"
+python do_checkpkg() {
+ localdata = bb.data.createCopy(d)
+ import sys
+ import re
+ import tempfile
+
+ """
+ sanity check to ensure same name and type. Match as many patterns as possible
+ such as:
+ gnome-common-2.20.0.tar.gz (most common format)
+ gtk+-2.90.1.tar.gz
+ xf86-input-synaptics-12.6.9.tar.gz
+ dri2proto-2.3.tar.gz
+ blktool_4.orig.tar.gz
+ libid3tag-0.15.1b.tar.gz
+ unzip552.tar.gz
+ icu4c-3_6-src.tgz
+ genext2fs_1.3.orig.tar.gz
+ gst-fluendo-mp3
+ """
+ prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*[\-_]" # match most patterns which uses "-" as separator to version digits
+ prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
+ prefix3 = "[0-9a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
+ prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
+ suffix = "(tar\.gz|tgz|tar\.bz2|zip|xz|rpm)"
+ suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "src.rpm")
+
+ sinterstr = "(?P<name>%s?)(?P<ver>.*)" % prefix
+ sdirstr = "(?P<name>%s)(?P<ver>.*)\.(?P<type>%s$)" % (prefix, suffix)
+
+ def parse_inter(s):
+ m = re.search(sinterstr, s)
+ if not m:
+ return None
+ else:
+ return (m.group('name'), m.group('ver'), "")
+
+ def parse_dir(s):
+ m = re.search(sdirstr, s)
+ if not m:
+ return None
+ else:
+ return (m.group('name'), m.group('ver'), m.group('type'))
+
+ """
+ Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
+ purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
+ for simplicity as it's somehow difficult to get from various upstream format
+ """
+ def __vercmp(old, new):
+ (on, ov, ot) = old
+ (en, ev, et) = new
+ if on != en or (et and et not in suffixtuple):
+ return 0
+ ov = re.search("[\d|\.]+[^a-zA-Z]+", ov).group()
+ ev = re.search("[\d|\.]+[^a-zA-Z]+", ev).group()
+ return bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
+
+ """
+ wrapper for fetch upstream directory info
+ 'url' - upstream link customized by regular expression
+ 'd' - database
+ 'tmpf' - tmpfile for fetcher output
+ We don't want to exit whole build due to one recipe error. So handle all exceptions
+ gracefully w/o leaking to outer.
+ """
+ def internal_fetch_wget(url, d, tmpf):
+ status = "ErrFetchUnknown"
+ """
+ Clear internal url cache as it's a temporary check. Not doing so will have
+ bitbake check url multiple times when looping through a single url
+ """
+ fn = d.getVar('FILE', True)
+ bb.fetch2.urldata_cache[fn] = {}
+
+ """
+ To avoid impacting bitbake build engine, this trick is required for reusing bitbake
+ interfaces. bb.fetch.go() is not appliable as it checks downloaded content in ${DL_DIR}
+ while we don't want to pollute that place. So bb.fetch2.checkstatus() is borrowed here
+ which is designed for check purpose but we override check command for our own purpose
+ """
+ ld = bb.data.createCopy(d)
+ d.setVar('CHECKCOMMAND_wget', "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12\" '${URI}'" \
+ % tmpf.name)
+ bb.data.update_data(ld)
+
+ try:
+ fetcher = bb.fetch2.Fetch([url], ld)
+ fetcher.checkstatus()
+ status = "SUCC"
+ except bb.fetch2.BBFetchException, e:
+ status = "ErrFetch"
+
+ return status
+
+ """
+ Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
+ 'url' - upstream link customized by regular expression
+ 'd' - database
+ 'curver' - current version
+ Return new version if success, or else error in "Errxxxx" style
+ """
+ def check_new_dir(url, curver, d):
+ pn = d.getVar('PN', True)
+ f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
+ status = internal_fetch_wget(url, d, f)
+ fhtml = f.read()
+ if status == "SUCC" and len(fhtml):
+ newver = parse_inter(curver)
+
+ """
+ match "*4.1/">*4.1/ where '*' matches chars
+ N.B. add package name, only match for digits
+ """
+ m = re.search("^%s" % prefix, curver)
+ if m:
+ s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
+ else:
+ s = "(\d+[\.\-_])+\d+/?"
+
+ searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
+ reg = re.compile(searchstr)
+
+ valid = 0
+ for line in fhtml.split("\n"):
+ if line.find(curver) >= 0:
+ valid = 1
+ m = reg.search(line)
+ if m:
+ ver = m.group().split("\"")[1]
+ ver = ver.strip("/")
+ ver = parse_inter(ver)
+ if ver and __vercmp(newver, ver) < 0:
+ newver = ver
+
+ """Expect a match for curver in directory list, or else it indicates unknown format"""
+ if not valid:
+ status = "ErrParseInterDir"
+ else:
+ """rejoin the path name"""
+ status = newver[0] + newver[1]
+ elif not len(fhtml):
+ status = "ErrHostNoDir"
+
+ f.close()
+ if status != "ErrHostNoDir" and re.match("Err", status):
+ logpath = d.getVar('LOG_DIR', True)
+ os.system("cp %s %s/" % (f.name, logpath))
+ os.unlink(f.name)
+ return status
+
+ """
+ Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
+ 'url' - upstream link customized by regular expression
+ 'd' - database
+ 'curname' - current package name
+ Return new version if success, or else error in "Errxxxx" style
+ """
+ def check_new_version(url, curname, d):
+ """possible to have no version in pkg name, such as spectrum-fw"""
+ if not re.search("\d+", curname):
+ return pcurver
+ pn = d.getVar('PN', True)
+ f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
+ status = internal_fetch_wget(url, d, f)
+ fhtml = f.read()
+
+ if status == "SUCC" and len(fhtml):
+ newver = parse_dir(curname)
+
+ """match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
+ pn1 = re.search("^%s" % prefix, curname).group()
+
+ s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
+ searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
+ reg = re.compile(searchstr)
+
+ valid = 0
+ for line in fhtml.split("\n"):
+ m = reg.search(line)
+ if m:
+ valid = 1
+ ver = m.group().split("\"")[1].split("/")[-1]
+ if ver == "download":
+ ver = m.group().split("\"")[1].split("/")[-2]
+ ver = parse_dir(ver)
+ if ver and __vercmp(newver, ver) < 0:
+ newver = ver
+
+ """Expect a match for curver in directory list, or else it indicates unknown format"""
+ if not valid:
+ status = "ErrParseDir"
+ else:
+ """newver still contains a full package name string"""
+ status = re.search("(\d+[\.\-_])*(\d+[0-9a-zA-Z]*)", newver[1]).group()
+ if "_" in status:
+ status = re.sub("_",".",status)
+ elif "-" in status:
+ status = re.sub("-",".",status)
+ elif not len(fhtml):
+ status = "ErrHostNoDir"
+
+ f.close()
+ """if host hasn't directory information, no need to save tmp file"""
+ if status != "ErrHostNoDir" and re.match("Err", status):
+ logpath = d.getVar('LOG_DIR', True)
+ os.system("cp %s %s/" % (f.name, logpath))
+ os.unlink(f.name)
+ return status
+
+ """first check whether a uri is provided"""
+ src_uri = d.getVar('SRC_URI', True)
+ if not src_uri:
+ return
+
+ """initialize log files."""
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ logfile = os.path.join(logpath, "checkpkg.csv")
+
+ """generate package information from .bb file"""
+ pname = d.getVar('PN', True)
+
+ if pname.find("-native") != -1:
+ pnstripped = pname.split("-native")
+ bb.note("Native Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pname.find("-cross") != -1:
+ pnstripped = pname.split("-cross")
+ bb.note("cross Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ if pname.find("-initial") != -1:
+ pnstripped = pname.split("-initial")
+ bb.note("initial Split: %s" % pnstripped)
+ localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+ bb.data.update_data(localdata)
+
+ pdesc = localdata.getVar('DESCRIPTION', True)
+ pgrp = localdata.getVar('SECTION', True)
+ pversion = localdata.getVar('PV', True)
+ plicense = localdata.getVar('LICENSE', True)
+ psection = localdata.getVar('SECTION', True)
+ phome = localdata.getVar('HOMEPAGE', True)
+ prelease = localdata.getVar('PR', True)
+ ppriority = localdata.getVar('PRIORITY', True)
+ pdepends = localdata.getVar('DEPENDS', True)
+ pbugtracker = localdata.getVar('BUGTRACKER', True)
+ ppe = localdata.getVar('PE', True)
+ psrcuri = localdata.getVar('SRC_URI', True)
+ maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
+
+ found = 0
+ for uri in src_uri.split():
+ m = re.compile('(?P<type>[^:]*)').match(uri)
+ if not m:
+ raise MalformedUrl(uri)
+ elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
+ found = 1
+ pproto = m.group('type')
+ break
+ if not found:
+ pproto = "file"
+ pupver = "N/A"
+ pstatus = "ErrUnknown"
+
+ (type, host, path, user, pswd, parm) = bb.decodeurl(uri)
+ if type in ['http', 'https', 'ftp']:
+ pcurver = d.getVar('PV', True)
+ else:
+ pcurver = d.getVar("SRCREV", True)
+
+ if type in ['http', 'https', 'ftp']:
+ newver = pcurver
+ altpath = path
+ dirver = "-"
+ curname = "-"
+
+ """
+ match version number amid the path, such as "5.7" in:
+ http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
+ N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
+ """
+ m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
+ if m:
+ altpath = path.split(m.group())[0]
+ dirver = m.group().strip("/")
+
+ """use new path and remove param. for wget only param is md5sum"""
+ alturi = bb.encodeurl([type, host, altpath, user, pswd, {}])
+
+ newver = check_new_dir(alturi, dirver, d)
+ altpath = path
+ if not re.match("Err", newver) and dirver != newver:
+ altpath = altpath.replace(dirver, newver, True)
+
+ """Now try to acquire all remote files in current directory"""
+ if not re.match("Err", newver):
+ curname = altpath.split("/")[-1]
+
+ """get remote name by skipping pacakge name"""
+ m = re.search(r"/.*/", altpath)
+ if not m:
+ altpath = "/"
+ else:
+ altpath = m.group()
+
+ alturi = bb.encodeurl([type, host, altpath, user, pswd, {}])
+ newver = check_new_version(alturi, curname, d)
+ while(newver == "ErrHostNoDir"):
+ if alturi == "/download":
+ break
+ else:
+ alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
+ newver = check_new_version(alturi, curname, d)
+ if not re.match("Err", newver):
+ pupver = newver
+ if pupver != pcurver:
+ pstatus = "UPDATE"
+ else:
+ pstatus = "MATCH"
+
+ if re.match("Err", newver):
+ pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
+ elif type == 'git':
+ if user:
+ gituser = user + '@'
+ else:
+ gituser = ""
+
+ if 'protocol' in parm:
+ gitproto = parm['protocol']
+ else:
+ gitproto = "git"
+ gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
+ gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
+ tmp = os.popen(gitcmd).read()
+ tmp2 = os.popen(gitcmd2).read()
+ #This is for those repo have tag like: refs/tags/1.2.2
+ if tmp:
+ tmpline = tmp.split("\n")
+ verflag = 0
+ for line in tmpline:
+ if len(line)==0:
+ break;
+ puptag = line.split("/")[-1]
+ puptag = re.search("[0-9][0-9|\.|_]+[0-9]", puptag)
+ if puptag == None:
+ continue;
+ puptag = puptag.group()
+ puptag = re.sub("_",".",puptag)
+ plocaltag = pversion.split("+")[0]
+ if "git" in plocaltag:
+ plocaltag = plocaltag.split("-")[0]
+ result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
+ if result > 0:
+ verflag = 1
+ pstatus = "UPDATE"
+ pupver = puptag
+ elif verflag == 0 :
+ pupver = plocaltag
+ pstatus = "MATCH"
+ #This is for those no tag repo
+ elif tmp2:
+ pupver = tmp2.split("\t")[0]
+ if pupver in pversion:
+ pstatus = "MATCH"
+ else:
+ pstatus = "UPDATE"
+ else:
+ pstatus = "ErrGitAccess"
+ elif type == 'svn':
+ options = []
+ if user:
+ options.append("--username %s" % user)
+ if pswd:
+ options.append("--password %s" % pswd)
+ svnproto = 'svn'
+ if 'proto' in parm:
+ svnproto = parm['proto']
+ if 'rev' in parm:
+ pcurver = parm['rev']
+
+ svncmd = "svn info %s %s://%s%s/%s/ 2>&1" % (" ".join(options), svnproto, host, path, parm["module"])
+ print svncmd
+ svninfo = os.popen(svncmd).read()
+ for line in svninfo.split("\n"):
+ if re.search("^Last Changed Rev:", line):
+ pupver = line.split(" ")[-1]
+ if pupver in pversion:
+ pstatus = "MATCH"
+ else:
+ pstatus = "UPDATE"
+
+ if re.match("Err", pstatus):
+ pstatus = "ErrSvnAccess"
+ elif type == 'cvs':
+ pupver = "HEAD"
+ pstatus = "UPDATE"
+ elif type == 'file':
+ """local file is always up-to-date"""
+ pupver = pcurver
+ pstatus = "MATCH"
+ else:
+ pstatus = "ErrUnsupportedProto"
+
+ if re.match("Err", pstatus):
+ pstatus += ":%s%s" % (host, path)
+
+ """Read from manual distro tracking fields as alternative"""
+ pmver = d.getVar("RECIPE_LATEST_VERSION", True)
+ if not pmver:
+ pmver = "N/A"
+ pmstatus = "ErrNoRecipeData"
+ else:
+ if pmver == pcurver:
+ pmstatus = "MATCH"
+ else:
+ pmstatus = "UPDATE"
+
+ psrcuri = psrcuri.split()[0]
+ pdepends = "".join(pdepends.split("\t"))
+ pdesc = "".join(pdesc.split("\t"))
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ f = open(logfile, "a")
+ f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \
+ (pname,pversion,pupver,plicense,psection, phome,prelease, ppriority,pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri,maintainer))
+ f.close()
+ bb.utils.unlockfile(lf)
+}
+
+addtask checkpkgall after do_checkpkg
+do_checkpkgall[recrdeptask] = "do_checkpkg"
+do_checkpkgall[nostamp] = "1"
+do_checkpkgall() {
+ :
+}
+
+addhandler distro_check_eventhandler
+python distro_check_eventhandler() {
+ if bb.event.getName(e) == "BuildStarted":
+ """initialize log files."""
+ import oe.distro_check as dc
+ result_file = dc.create_log_file(e.data, "distrocheck.csv")
+ return
+}
+
+addtask distro_check
+do_distro_check[nostamp] = "1"
+python do_distro_check() {
+ """checks if the package is present in other public Linux distros"""
+ import oe.distro_check as dc
+ import bb
+ import shutil
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
+ return
+
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+ tmpdir = d.getVar('TMPDIR', True)
+ distro_check_dir = os.path.join(tmpdir, "distro_check")
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ result_file = os.path.join(logpath, "distrocheck.csv")
+ datetime = localdata.getVar('DATETIME', True)
+ dc.update_distro_data(distro_check_dir, datetime)
+
+ # do the comparison
+ result = dc.compare_in_distro_packages_list(distro_check_dir, d)
+
+ # save the results
+ dc.save_distro_check_result(result, datetime, result_file, d)
+}
+
+addtask distro_checkall after do_distro_check
+do_distro_checkall[recrdeptask] = "do_distro_check"
+do_distro_checkall[nostamp] = "1"
+do_distro_checkall() {
+ :
+}
+#
+#Check Missing License Text.
+#Use this task to generate the missing license text data for pkg-report system,
+#then we can search those recipes which license text isn't exsit in common-licenses directory
+#
+addhandler checklicense_eventhandler
+python checklicense_eventhandler() {
+ if bb.event.getName(e) == "BuildStarted":
+ """initialize log files."""
+ import oe.distro_check as dc
+ logfile = dc.create_log_file(e.data, "missinglicense.csv")
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ f = open(logfile, "a")
+ f.write("Package\tLicense\tMissingLicense\n")
+ f.close()
+ bb.utils.unlockfile(lf)
+ return
+}
+
+addtask checklicense
+do_checklicense[nostamp] = "1"
+python do_checklicense() {
+ import os
+ import bb
+ import shutil
+ logpath = d.getVar('LOG_DIR', True)
+ bb.utils.mkdirhier(logpath)
+ pn = d.getVar('PN', True)
+ logfile = os.path.join(logpath, "missinglicense.csv")
+ generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
+ license_types = d.getVar('LICENSE', True)
+ for license_type in ((license_types.replace('+', '').replace('|', '&')
+ .replace('(', '').replace(')', '').replace(';', '')
+ .replace(',', '').replace(" ", "").split("&"))):
+ if not os.path.isfile(os.path.join(generic_directory, license_type)):
+ lf = bb.utils.lockfile("%s.lock" % logfile)
+ f = open(logfile, "a")
+ f.write("%s\t%s\t%s\n" % \
+ (pn,license_types,license_type))
+ f.close()
+ bb.utils.unlockfile(lf)
+ return
+}
+
+addtask checklicenseall after do_checklicense
+do_checklicenseall[recrdeptask] = "do_checklicense"
+do_checklicenseall[nostamp] = "1"
+do_checklicenseall() {
+ :
+}
+
+
diff --git a/meta/classes/distutils-base.bbclass b/meta/classes/distutils-base.bbclass
index 5150be76b..6d18e08f1 100644
--- a/meta/classes/distutils-base.bbclass
+++ b/meta/classes/distutils-base.bbclass
@@ -1,18 +1,5 @@
-EXTRA_OEMAKE = ""
-DEPENDS += "${@["python-native python", ""][(bb.data.getVar('PACKAGES', d, 1) == '')]}"
-RDEPENDS += "python-core"
+DEPENDS += "${@["python-native python", ""][(d.getVar('PACKAGES', True) == '')]}"
+RDEPENDS_${PN} += "${@['', 'python-core']['${PN}' == '${BPN}']}"
-def python_dir(d):
- import os, bb
- staging_incdir = bb.data.getVar( "STAGING_INCDIR", d, 1 )
- if os.path.exists( "%s/python2.5" % staging_incdir ): return "python2.5"
- if os.path.exists( "%s/python2.4" % staging_incdir ): return "python2.4"
- if os.path.exists( "%s/python2.3" % staging_incdir ): return "python2.3"
- raise "No Python in STAGING_INCDIR. Forgot to build python-native ?"
-
-PYTHON_DIR = "${@python_dir(d)}"
-FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
-FILES_${PN}-dbg = "${libdir}/${PYTHON_DIR}/site-packages/.debug \
- ${libdir}/${PYTHON_DIR}/site-packages/*/.debug \
- ${libdir}/${PYTHON_DIR}/site-packages/*/*/.debug"
+inherit distutils-common-base
diff --git a/meta/classes/distutils-common-base.bbclass b/meta/classes/distutils-common-base.bbclass
new file mode 100644
index 000000000..f66a5cd57
--- /dev/null
+++ b/meta/classes/distutils-common-base.bbclass
@@ -0,0 +1,21 @@
+inherit python-dir
+
+EXTRA_OEMAKE = ""
+
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+PACKAGES = "${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
+
+FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+
+FILES_${PN}-dev += "\
+ ${datadir}/pkgconfig \
+ ${libdir}/pkgconfig \
+ ${PYTHON_SITEPACKAGES_DIR}/*.la \
+"
+FILES_${PN}-dbg += "\
+ ${PYTHON_SITEPACKAGES_DIR}/.debug \
+ ${PYTHON_SITEPACKAGES_DIR}/*/.debug \
+ ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
+"
diff --git a/meta/classes/distutils-native-base.bbclass b/meta/classes/distutils-native-base.bbclass
new file mode 100644
index 000000000..ceda512e3
--- /dev/null
+++ b/meta/classes/distutils-native-base.bbclass
@@ -0,0 +1,3 @@
+DEPENDS += "${@["python-native", ""][(d.getVar('PACKAGES', True) == '')]}"
+
+inherit distutils-common-base
diff --git a/meta/classes/distutils.bbclass b/meta/classes/distutils.bbclass
index 27351d9c7..18ae805f7 100644
--- a/meta/classes/distutils.bbclass
+++ b/meta/classes/distutils.bbclass
@@ -1,39 +1,45 @@
inherit distutils-base
+DISTUTILS_BUILD_ARGS ?= ""
+DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
+DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
+ --install-data=${STAGING_DATADIR}"
+DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
+ --install-data=${D}/${datadir}"
+
distutils_do_compile() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
- ${STAGING_BINDIR_NATIVE}/python setup.py build || \
- oefatal "python setup.py build_ext execution failed."
+ ${STAGING_BINDIR_NATIVE}/python setup.py build ${DISTUTILS_BUILD_ARGS} || \
+ bbfatal "python setup.py build_ext execution failed."
}
distutils_stage_headers() {
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
- ${STAGING_BINDIR_NATIVE}/python setup.py install_headers --install-dir=${STAGING_INCDIR}/${PYTHON_DIR} || \
- oefatal "python setup.py install_headers execution failed."
+ ${STAGING_BINDIR_NATIVE}/python setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
+ bbfatal "python setup.py install_headers execution failed."
}
distutils_stage_all() {
- install -d ${STAGING_DIR_HOST}${layout_prefix}/${PYTHON_DIR}/site-packages
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${STAGING_DIR_HOST}${layout_prefix}/${PYTHON_DIR}/site-packages \
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
+ PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
- ${STAGING_BINDIR_NATIVE}/python setup.py install --prefix=${STAGING_DIR_HOST}${layout_prefix} --install-data=${STAGING_DATADIR} || \
- oefatal "python setup.py install (stage) execution failed."
+ ${STAGING_BINDIR_NATIVE}/python setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
+ bbfatal "python setup.py install (stage) execution failed."
}
distutils_do_install() {
- install -d ${D}${libdir}/${PYTHON_DIR}/site-packages
- STAGING_INCDIR=${STAGING_INCDIR} \
- STAGING_LIBDIR=${STAGING_LIBDIR} \
- PYTHONPATH=${D}/${libdir}/${PYTHON_DIR}/site-packages \
+ install -d ${D}${PYTHON_SITEPACKAGES_DIR}
+ STAGING_INCDIR=${STAGING_INCDIR} \
+ STAGING_LIBDIR=${STAGING_LIBDIR} \
+ PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
- ${STAGING_BINDIR_NATIVE}/python setup.py install --prefix=${D}/${prefix} --install-data=${D}/${datadir} || \
- oefatal "python setup.py install execution failed."
+ ${STAGING_BINDIR_NATIVE}/python setup.py install ${DISTUTILS_INSTALL_ARGS} || \
+ bbfatal "python setup.py install execution failed."
for i in `find ${D} -name "*.py"` ; do \
sed -i -e s:${D}::g $i
@@ -45,15 +51,26 @@ distutils_do_install() {
done
fi
- if test -e ${D}${sbindir} ; then
+ if test -e ${D}${sbindir}; then
for i in ${D}${sbindir}/* ; do \
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
fi
- rm -f ${D}${libdir}/${PYTHON_DIR}/site-packages/easy-install.pth
+ rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
+ #
+ # FIXME: Bandaid against wrong datadir computation
+ #
+ if test -e ${D}${datadir}/share; then
+ mv -f ${D}${datadir}/share/* ${D}${datadir}/
+ fi
+
+ # These are generated files, on really slow systems the storage/speed trade off
+ # might be worth it, but in general it isn't
find ${D}${libdir}/${PYTHON_DIR}/site-packages -iname '*.pyo' -exec rm {} \;
}
EXPORT_FUNCTIONS do_compile do_install
+
+export LDSHARED="${CCLD} -shared"
diff --git a/meta/classes/dummy.bbclass b/meta/classes/dummy.bbclass
new file mode 100644
index 000000000..8c300717d
--- /dev/null
+++ b/meta/classes/dummy.bbclass
@@ -0,0 +1,2 @@
+# An empty bbclass to facilitate dynamic inherit, include,
+# and require statements.
diff --git a/meta/classes/externalsrc.bbclass b/meta/classes/externalsrc.bbclass
new file mode 100644
index 000000000..7e00ef8d1
--- /dev/null
+++ b/meta/classes/externalsrc.bbclass
@@ -0,0 +1,53 @@
+# Copyright (C) 2012 Linux Foundation
+# Author: Richard Purdie
+# Some code and influence taken from srctree.bbclass:
+# Copyright (C) 2009 Chris Larson <clarson@kergoth.com>
+# Released under the MIT license (see COPYING.MIT for the terms)
+#
+# externalsrc.bbclass enables use of an existing source tree, usually external to
+# the build system to build a piece of software rather than the usual fetch/unpack/patch
+# process.
+#
+# To use, set S to point at the directory you want to use containing the sources
+# e.g. S = "/path/to/my/source/tree"
+#
+# If the class is to work for both target and native versions (or with multilibs/
+# cross or other BBCLASSEXTEND variants), its expected that setting B to point to
+# where to place the compiled binaries will work (split source and build directories).
+# This is the default but B can be set to S if circumstaces dictate.
+#
+
+SRC_URI = ""
+SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
+B = "${WORKDIR}/${BPN}-${PV}/"
+
+def remove_tasks(tasks, deltasks, d):
+ for task in tasks:
+ deps = d.getVarFlag(task, "deps")
+ for preptask in deltasks:
+ if preptask in deps:
+ deps.remove(preptask)
+ d.setVarFlag(task, "deps", deps)
+ # Poking around bitbake internal variables is evil but there appears to be no better way :(
+ tasklist = d.getVar('__BBTASKS') or []
+ for task in deltasks:
+ d.delVarFlag(task, "task")
+ if task in tasklist:
+ tasklist.remove(task)
+ d.setVar('__BBTASKS', tasklist)
+
+python () {
+ tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
+ covered = d.getVar("SRCTREECOVEREDTASKS", True).split()
+
+ for task in tasks:
+ if task.endswith("_setscene"):
+ # sstate is never going to work for external source trees, disable it
+ covered.append(task)
+ else:
+ # Since configure will likely touch ${S}, ensure only we lock so one task has access at a time
+ d.appendVarFlag(task, "lockfiles", "${S}/singletask.lock")
+
+ remove_tasks(tasks, covered, d)
+}
+
diff --git a/meta/classes/flow-lossage.bbclass b/meta/classes/flow-lossage.bbclass
deleted file mode 100644
index 00e6bf025..000000000
--- a/meta/classes/flow-lossage.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-# gcc-3.4 blows up in gtktext with -frename-registers on arm-linux
-python () {
- cflags = (bb.data.getVar('CFLAGS', d, 1) or '').replace('-frename-registers', '')
- bb.data.setVar('CFLAGS', cflags, d)
-}
diff --git a/meta/classes/gconf.bbclass b/meta/classes/gconf.bbclass
index e147ecf68..a966c268b 100644
--- a/meta/classes/gconf.bbclass
+++ b/meta/classes/gconf.bbclass
@@ -1,5 +1,10 @@
DEPENDS += "gconf gconf-native"
+# This is referenced by the gconf m4 macros and would default to the value hardcoded
+# into gconf at compile time otherwise
+export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
+export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
+
gconf_postinst() {
if [ "x$D" != "x" ]; then
exit 1
@@ -26,12 +31,12 @@ done
}
python populate_packages_append () {
- import os.path, re
- packages = bb.data.getVar('PACKAGES', d, 1).split()
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ import re
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
for pkg in packages:
- schema_dir = '%s/install/%s/etc/gconf/schemas' % (workdir, pkg)
+ schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
schemas = []
schema_re = re.compile(".*\.schemas$")
if os.path.exists(schema_dir):
@@ -40,16 +45,19 @@ python populate_packages_append () {
schemas.append(f)
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
- bb.data.setVar('SCHEMA_FILES', " ".join(schemas), d)
- postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ d.setVar('SCHEMA_FILES', " ".join(schemas))
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += bb.data.getVar('gconf_postinst', d, 1)
- bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
- prerm = bb.data.getVar('pkg_prerm_%s' % pkg, d, 1) or bb.data.getVar('pkg_prerm', d, 1)
+ postinst += d.getVar('gconf_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ prerm = d.getVar('pkg_prerm_%s' % pkg, True) or d.getVar('pkg_prerm', True)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += bb.data.getVar('gconf_prerm', d, 1)
- bb.data.setVar('pkg_prerm_%s' % pkg, prerm, d)
+ prerm += d.getVar('gconf_prerm', True)
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
+ rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
+ rdepends += ' ' + d.getVar('MLPREFIX') + 'gconf'
+ d.setVar("RDEPENDS_%s" % pkg, rdepends)
}
diff --git a/meta/classes/gettext.bbclass b/meta/classes/gettext.bbclass
index a1e00e72c..95818c677 100644
--- a/meta/classes/gettext.bbclass
+++ b/meta/classes/gettext.bbclass
@@ -1,16 +1,21 @@
-def gettext_after_parse(d):
- import bb
+def gettext_dependencies(d):
+ if d.getVar('USE_NLS', True) == 'no' and not oe.utils.inherits(d, 'native', 'nativesdk', 'cross'):
+ return ""
+ if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
+ return ""
+ if oe.utils.inherits(d, 'native', 'cross'):
+ return "gettext-minimal-native"
+ return d.getVar('DEPENDS_GETTEXT', False)
+
+def gettext_oeconf(d):
+ if oe.utils.inherits(d, 'native', 'cross'):
+ return '--disable-nls'
# Remove the NLS bits if USE_NLS is no.
- if bb.data.getVar('USE_NLS', d, 1) == 'no':
- cfg = oe_filter_out('^--(dis|en)able-nls$', bb.data.getVar('EXTRA_OECONF', d, 1) or "", d)
- cfg += " --disable-nls"
- depends = bb.data.getVar('DEPENDS', d, 1) or ""
- bb.data.setVar('DEPENDS', oe_filter_out('^(virtual/libiconv|virtual/libintl)$', depends, d), d)
- bb.data.setVar('EXTRA_OECONF', cfg, d)
+ if d.getVar('USE_NLS', True) == 'no' and not oe.utils.inherits(d, 'nativesdk', 'cross-canadian'):
+ return '--disable-nls'
+ return "--enable-nls"
-python () {
- gettext_after_parse(d)
-}
+DEPENDS_GETTEXT ??= "virtual/gettext gettext-native"
-DEPENDS =+ "gettext-native"
-EXTRA_OECONF += "--enable-nls"
+BASEDEPENDS =+ "${@gettext_dependencies(d)}"
+EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"
diff --git a/meta/classes/gnome.bbclass b/meta/classes/gnome.bbclass
index 25d177885..a19dd1703 100644
--- a/meta/classes/gnome.bbclass
+++ b/meta/classes/gnome.bbclass
@@ -1,20 +1,3 @@
-def gnome_verdir(v):
- import re
- m = re.match("^([0-9]+)\.([0-9]+)", v)
- return "%s.%s" % (m.group(1), m.group(2))
+inherit gnomebase gtk-icon-cache gconf mime
-SECTION ?= "x11/gnome"
-SRC_URI = "${GNOME_MIRROR}/${PN}/${@gnome_verdir("${PV}")}/${PN}-${PV}.tar.bz2"
-
-DEPENDS += "gnome-common"
-
-FILES_${PN} += "${datadir}/application-registry ${datadir}/mime-info \
- ${datadir}/gnome-2.0"
-
-inherit autotools pkgconfig gconf
-
-EXTRA_OEMAKE += "GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL=1"
-
-gnome_stage_includes() {
- autotools_stage_includes
-}
+EXTRA_OECONF += "--enable-introspection=no"
diff --git a/meta/classes/gnomebase.bbclass b/meta/classes/gnomebase.bbclass
new file mode 100644
index 000000000..a4209a028
--- /dev/null
+++ b/meta/classes/gnomebase.bbclass
@@ -0,0 +1,30 @@
+def gnome_verdir(v):
+ import re
+ m = re.match("^([0-9]+)\.([0-9]+)", v)
+ return "%s.%s" % (m.group(1), m.group(2))
+
+SECTION ?= "x11/gnome"
+SRC_URI = "${GNOME_MIRROR}/${BPN}/${@gnome_verdir("${PV}")}/${BPN}-${PV}.tar.bz2;name=archive"
+
+DEPENDS += "gnome-common"
+
+FILES_${PN} += "${datadir}/application-registry \
+ ${datadir}/mime-info \
+ ${datadir}/mime/packages \
+ ${datadir}/mime/application \
+ ${datadir}/gnome-2.0 \
+ ${datadir}/polkit* \
+ ${datadir}/GConf \
+ ${datadir}/glib-2.0/schemas \
+"
+
+FILES_${PN}-doc += "${datadir}/devhelp"
+
+inherit autotools pkgconfig
+
+do_install_append() {
+ rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
+ rm -rf ${D}${localstatedir}/scrollkeeper/*
+ rm -f ${D}${datadir}/applications/*.cache
+}
+
diff --git a/meta/classes/grub-efi.bbclass b/meta/classes/grub-efi.bbclass
new file mode 100644
index 000000000..1efb43b80
--- /dev/null
+++ b/meta/classes/grub-efi.bbclass
@@ -0,0 +1,116 @@
+# grub-efi.bbclass
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# Released under the MIT license (see packages/COPYING)
+
+# Provide grub-efi specific functions for building bootable images.
+
+# External variables
+# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+# ${LABELS} - a list of targets for the automatic config
+# ${APPEND} - an override list of append strings for each label
+# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
+# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
+
+do_bootimg[depends] += "grub-efi-${TARGET_ARCH}-native:do_deploy"
+
+GRUBCFG = "${S}/grub.cfg"
+GRUB_TIMEOUT ?= "10"
+#FIXME: build this from the machine config
+GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
+
+EFIDIR = "/EFI/BOOT"
+
+grubefi_populate() {
+ # DEST must be the root of the image so that EFIDIR is not
+ # nested under a top level directory.
+ DEST=$1
+
+ install -d ${DEST}${EFIDIR}
+
+ GRUB_IMAGE="bootia32.efi"
+ if [ "${TARGET_ARCH}" = "x86_64" ]; then
+ GRUB_IMAGE="bootx64.efi"
+ fi
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
+
+ install -m 0644 ${GRUBCFG} ${DEST}${EFIDIR}
+}
+
+grubefi_iso_populate() {
+ grubefi_populate ${ISODIR}
+}
+
+grubefi_hddimg_populate() {
+ grubefi_populate ${HDDDIR}
+}
+
+python build_grub_cfg() {
+ import sys
+
+ workdir = d.getVar('WORKDIR', True)
+ if not workdir:
+ bb.error("WORKDIR not defined, unable to package")
+ return
+
+ labels = d.getVar('LABELS', True)
+ if not labels:
+ bb.debug(1, "LABELS not defined, nothing to do")
+ return
+
+ if labels == []:
+ bb.debug(1, "No labels, nothing to do")
+ return
+
+ cfile = d.getVar('GRUBCFG', True)
+ if not cfile:
+ raise bb.build.FuncFailed('Unable to read GRUBCFG')
+
+ try:
+ cfgfile = file(cfile, 'w')
+ except OSError:
+ raise bb.build.funcFailed('Unable to open %s' % (cfile))
+
+ cfgfile.write('# Automatically created by OE\n')
+
+ opts = d.getVar('GRUB_OPTS', True)
+ if opts:
+ for opt in opts.split(';'):
+ cfgfile.write('%s\n' % opt)
+
+ cfgfile.write('default=%s\n' % (labels.split()[0]))
+
+ timeout = d.getVar('GRUB_TIMEOUT', True)
+ if timeout:
+ cfgfile.write('timeout=%s\n' % timeout)
+ else:
+ cfgfile.write('timeout=50\n')
+
+ for label in labels.split():
+ localdata = d.createCopy()
+
+ overrides = localdata.getVar('OVERRIDES', True)
+ if not overrides:
+ raise bb.build.FuncFailed('OVERRIDES not defined')
+
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
+ bb.data.update_data(localdata)
+
+ cfgfile.write('\nmenuentry \'%s\'{\n' % (label))
+ cfgfile.write('linux /vmlinuz LABEL=%s' % (label))
+
+ append = localdata.getVar('APPEND', True)
+ initrd = localdata.getVar('INITRD', True)
+
+ if append:
+ cfgfile.write('%s' % (append))
+ cfgfile.write('\n')
+
+ if initrd:
+ cfgfile.write('initrd /initrd')
+ cfgfile.write('\n}\n')
+
+ cfgfile.close()
+}
diff --git a/meta/classes/gtk-icon-cache.bbclass b/meta/classes/gtk-icon-cache.bbclass
index b25636517..60e3401f4 100644
--- a/meta/classes/gtk-icon-cache.bbclass
+++ b/meta/classes/gtk-icon-cache.bbclass
@@ -1,5 +1,6 @@
FILES_${PN} += "${datadir}/icons/hicolor"
-RDEPENDS += "hicolor-icon-theme"
+
+DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']}"
# This could run on the host as icon cache files are architecture independent,
# but there is no gtk-update-icon-cache built natively.
@@ -9,37 +10,49 @@ if [ "x$D" != "x" ]; then
fi
# Update the pixbuf loaders in case they haven't been registered yet
-gdk-pixbuf-query-loaders > /etc/gtk-2.0/gdk-pixbuf.loaders
+GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
-gtk-update-icon-cache -q /usr/share/icons/hicolor
+for icondir in /usr/share/icons/* ; do
+ if [ -d $icondir ] ; then
+ gtk-update-icon-cache -fqt $icondir
+ fi
+done
}
gtk_icon_cache_postrm() {
-gtk-update-icon-cache -q /usr/share/icons/hicolor
+for icondir in /usr/share/icons/* ; do
+ if [ -d $icondir ] ; then
+ gtk-update-icon-cache -qt $icondir
+ fi
+done
}
python populate_packages_append () {
- import os.path
- packages = bb.data.getVar('PACKAGES', d, 1).split()
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
for pkg in packages:
- icon_dir = '%s/install/%s/%s/icons/hicolor' % (workdir, pkg, bb.data.getVar('datadir', d, 1))
+ icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', True))
if not os.path.exists(icon_dir):
continue
-
+
+ bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
+ rdepends = d.getVar('RDEPENDS_%s' % pkg, True)
+ rdepends = rdepends + ' ' + d.getVar('MLPREFIX') + "hicolor-icon-theme"
+ d.setVar('RDEPENDS_%s' % pkg, rdepends)
+
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
- postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += bb.data.getVar('gtk_icon_cache_postinst', d, 1)
- bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+ postinst += d.getVar('gtk_icon_cache_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
- postrm = bb.data.getVar('pkg_postrm_%s' % pkg, d, 1) or bb.data.getVar('pkg_postrm', d, 1)
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += bb.data.getVar('gtk_icon_cache_postrm', d, 1)
- bb.data.setVar('pkg_postrm_%s' % pkg, postrm, d)
+ postrm += d.getVar('gtk_icon_cache_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/meta/classes/gzipnative.bbclass b/meta/classes/gzipnative.bbclass
new file mode 100644
index 000000000..4a411434a
--- /dev/null
+++ b/meta/classes/gzipnative.bbclass
@@ -0,0 +1,3 @@
+PATH_prepend = "${STAGING_BINDIR_NATIVE}/pigz-native:${STAGING_BINDIR_NATIVE}/gzip-native:"
+DEPENDS += "gzip-native"
+
diff --git a/meta/classes/icecc.bbclass b/meta/classes/icecc.bbclass
index 56cbd6444..ae74050f6 100644
--- a/meta/classes/icecc.bbclass
+++ b/meta/classes/icecc.bbclass
@@ -9,7 +9,7 @@
# ICECC_VERSION accordingly.
#
#The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
-#necessary enviroment tar.gz file to be used by the remote machines.
+#necessary environment tar.gz file to be used by the remote machines.
#It also supports meta-toolchain generation
#
#If ICECC_PATH is not set in local.conf then the class will try to locate it using 'which'
@@ -26,313 +26,195 @@
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
+ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
-def icc_determine_gcc_version(gcc):
- """
- Hack to determine the version of GCC
-
- 'i686-apple-darwin8-gcc-4.0.1 (GCC) 4.0.1 (Apple Computer, Inc. build 5363)'
- """
- import os
- return os.popen("%s --version" % gcc ).readline().split()[2]
-
-def create_cross_env(bb,d):
- """
- Create a tar.bz2 of the current toolchain
- """
-
- # Constin native-native compilation no environment needed if
- # host prefix is empty (let us duplicate the query for ease)
- prefix = bb.data.expand('${HOST_PREFIX}', d)
- if len(prefix) == 0:
- return ""
-
- import tarfile, socket, time, os
- ice_dir = bb.data.expand('${CROSS_DIR}', d)
- prefix = bb.data.expand('${HOST_PREFIX}' , d)
- distro = bb.data.expand('${DISTRO}', d)
- target_sys = bb.data.expand('${TARGET_SYS}', d)
- target_prefix = bb.data.expand('${TARGET_PREFIX}', d)
- float = bb.data.getVar('TARGET_FPU', d) or "hard"
- name = socket.gethostname()
-
-
- # Stupid check to determine if we have built a libc and a cross
- # compiler.
- try:
- os.stat(os.path.join(ice_dir, target_sys, 'lib', 'libc.so'))
- os.stat(os.path.join(ice_dir, target_sys, 'bin', 'g++'))
- except: # no cross compiler built yet
- return ""
-
- VERSION = icc_determine_gcc_version( os.path.join(ice_dir,target_sys,"bin","g++") )
- cross_name = prefix + distro + "-" + target_sys + "-" + float + "-" + VERSION + "-" + name
- tar_file = os.path.join(ice_dir, 'ice', cross_name + '.tar.gz')
-
- try:
- os.stat(tar_file)
- # tar file already exists
- return tar_file
- except:
- try:
- os.makedirs(os.path.join(ice_dir,'ice'))
- except:
- # directory already exists, continue
- pass
-
-
- #check if user has specified a specific icecc-create-env script
- #if not use the OE provided one
- cr_env_script = bb.data.getVar('ICECC_ENV_EXEC', d) or bb.data.expand('${STAGING_DIR}', d)+"/ice/icecc-create-env"
- #call the modified create-env script
- result=os.popen("%s %s %s %s %s %s" %(cr_env_script,
- "--silent",
- os.path.join(ice_dir,target_sys,'bin','gcc'),
- os.path.join(ice_dir,target_sys,'bin','g++'),
- os.path.join(ice_dir,target_sys,'bin','as'),
- os.path.join(ice_dir,"ice",cross_name) ) )
- return tar_file
-
-
-def create_native_env(bb,d):
-
- import tarfile, socket, time, os
- ice_dir = bb.data.expand('${CROSS_DIR}', d)
- prefix = bb.data.expand('${HOST_PREFIX}' , d)
- distro = bb.data.expand('${DISTRO}', d)
- target_sys = bb.data.expand('${TARGET_SYS}', d)
- target_prefix = bb.data.expand('${TARGET_PREFIX}', d)
- float = bb.data.getVar('TARGET_FPU', d) or "hard"
- name = socket.gethostname()
-
-
- archive_name = "local-host-env" + "-" + name
- tar_file = os.path.join(ice_dir, 'ice', archive_name + '.tar.gz')
-
- try:
- os.stat(tar_file)
- # tar file already exists
- return tar_file
- except:
- try:
- #os.makedirs(os.path.join(ice_dir))
- os.makedirs(os.path.join(ice_dir,'ice'))
- except:
- # directory already exists, continue
- pass
-
-
- #check if user has specified a specific icecc-create-env script
- #if not use the OE provided one
- cr_env_script = bb.data.getVar('ICECC_ENV_EXEC', d) or bb.data.expand('${STAGING_DIR}', d)+"/ice/icecc-create-env"
- result=os.popen("%s %s %s %s %s %s" %(cr_env_script,
- "--silent",
- os.popen("%s gcc" % "which").read()[:-1],
- os.popen("%s g++" % "which").read()[:-1],
- os.popen("%s as" % "which").read()[:-1],
- os.path.join(ice_dir,"ice",archive_name) ) )
- return tar_file
-
-
-
-def create_cross_kernel_env(bb,d):
-
- import tarfile, socket, time, os
- ice_dir = bb.data.expand('${CROSS_DIR}', d)
- prefix = bb.data.expand('${HOST_PREFIX}' , d)
- distro = bb.data.expand('${DISTRO}', d)
- target_sys = bb.data.expand('${TARGET_SYS}', d)
- target_prefix = bb.data.expand('${TARGET_PREFIX}', d)
- float = bb.data.getVar('TARGET_FPU', d) or "hard"
- name = socket.gethostname()
- kernel_cc = bb.data.expand('${KERNEL_CC}', d)
- kernel_cc = kernel_cc[:-1]
-
-
- # Stupid check to determine if we have built a libc and a cross
- # compiler.
- try:
- os.stat(os.path.join(ice_dir, 'bin', kernel_cc))
- except: # no cross compiler built yet
- return ""
+def icecc_dep_prepend(d):
+ # INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
+ # we need that built is the responsibility of the patch function / class, not
+ # the application.
+ if not d.getVar('INHIBIT_DEFAULT_DEPS'):
+ return "icecc-create-env-native"
+ return ""
- VERSION = icc_determine_gcc_version( os.path.join(ice_dir,"bin",kernel_cc) )
- cross_name = prefix + distro + "-" + target_sys + "-" + float + "-" + VERSION + "-" + name
- tar_file = os.path.join(ice_dir, 'ice', cross_name + '.tar.gz')
+DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
- try:
- os.stat(tar_file)
- # tar file already exists
- return tar_file
- except:
- try:
- os.makedirs(os.path.join(ice_dir,'ice'))
- except:
- # directory already exists, continue
- pass
-
-
- #check if user has specified a specific icecc-create-env script
- #if not use the OE provided one
- cr_env_script = bb.data.getVar('ICECC_ENV_EXEC', d) or bb.data.expand('${STAGING_DIR}', d)+"/ice/icecc-create-env"
- result=os.popen("%s %s %s %s %s %s" %(cr_env_script,
- "--silent",
- os.path.join(ice_dir,'bin',kernel_cc),
- os.path.join(ice_dir,target_sys,'bin','g++'),
- os.path.join(ice_dir,target_sys,'bin','as'),
- os.path.join(ice_dir,"ice",cross_name) ) )
- return tar_file
-
-
-def create_env(bb,d):
-
- #return create_cross_kernel_env(bb,d)
+def get_cross_kernel_cc(bb,d):
+ kernel_cc = d.expand('${KERNEL_CC}')
+ kernel_cc = kernel_cc.replace('ccache', '').strip()
+ kernel_cc = kernel_cc.split(' ')[0]
+ kernel_cc = kernel_cc.strip()
+ return kernel_cc
- if bb.data.inherits_class("native", d):
- return create_native_env(bb,d)
- elif bb.data.inherits_class("kernel", d):
- return create_cross_kernel_env(bb,d)
- elif bb.data.inherits_class("cross", d):
- return create_native_env(bb,d)
- elif bb.data.inherits_class("sdk", d):
- return create_native_env(bb,d)
- else:
- return create_cross_env(bb,d)
-
-
-def create_path(compilers, type, bb, d):
+def create_path(compilers, bb, d):
"""
Create Symlinks for the icecc in the staging directory
"""
- import os
-
- staging = os.path.join(bb.data.expand('${STAGING_DIR}', d), "ice", type)
+ staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
+ if icc_is_kernel(bb, d):
+ staging += "-kernel"
#check if the icecc path is set by the user
- icecc = bb.data.getVar('ICECC_PATH', d) or os.popen("%s icecc" % "which").read()[:-1]
+ icecc = d.getVar('ICECC_PATH') or os.popen("which icecc").read()[:-1]
-
# Create the dir if necessary
try:
os.stat(staging)
except:
- os.makedirs(staging)
+ try:
+ os.makedirs(staging)
+ except:
+ pass
for compiler in compilers:
gcc_path = os.path.join(staging, compiler)
try:
os.stat(gcc_path)
except:
- os.symlink(icecc, gcc_path)
-
- return staging + ":"
-
-
-
-
-
-def use_icc_version(bb,d):
+ try:
+ os.symlink(icecc, gcc_path)
+ except:
+ pass
- icecc_ver = "yes"
- system_class_blacklist = [ "none" ]
-
- for black in system_class_blacklist:
- if bb.data.inherits_class(black, d):
- icecc_ver = "no"
+ return staging
+def use_icc(bb,d):
+ package_tmp = d.expand('${PN}')
- user_class_blacklist = bb.data.getVar('ICECC_USER_CLASS_BL', d) or "none"
- user_class_blacklist = user_class_blacklist.split()
-
- for black in user_class_blacklist:
- if bb.data.inherits_class(black, d):
- icecc_ver = "no"
-
- return icecc_ver
+ system_class_blacklist = [ "none" ]
+ user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
+ package_class_blacklist = system_class_blacklist + user_class_blacklist
-
-
-def icc_path(bb,d,compile):
- package_tmp = bb.data.expand('${PN}', d)
+ for black in package_class_blacklist:
+ if bb.data.inherits_class(black, d):
+ #bb.note(package_tmp, ' class ', black, ' found in blacklist, disable icecc')
+ return "no"
#"system" package blacklist contains a list of packages that can not distribute compile tasks
#for one reason or the other
- system_package_blacklist = [ "uclibc", "glibc-intermediate", "gcc", "qemu", "bind", "u-boot", "dhcp-forwarder", "enchant" ]
+ system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
+ user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
+ package_blacklist = system_package_blacklist + user_package_blacklist
- for black in system_package_blacklist:
- if black in package_tmp:
- bb.data.setVar("PARALLEL_MAKE" , "", d)
- return ""
+ for black in package_blacklist:
+ if black in package_tmp:
+ #bb.note(package_tmp, ' found in blacklist, disable icecc')
+ return "no"
- #user defined exclusion list
- user_package_blacklist = bb.data.getVar('ICECC_USER_PACKAGE_BL', d) or "none"
- user_package_blacklist = user_package_blacklist.split()
+ if d.getVar('PARALLEL_MAKE') == "":
+ bb.note(package_tmp, " ", d.expand('${PV}'), " has empty PARALLEL_MAKE, disable icecc")
+ return "no"
- for black in user_package_blacklist:
- if black in package_tmp:
- bb.data.setVar("PARALLEL_MAKE" , "", d)
- return ""
+ return "yes"
+def icc_is_kernel(bb, d):
+ return \
+ bb.data.inherits_class("kernel", d);
- prefix = bb.data.expand('${HOST_PREFIX}', d)
+def icc_is_native(bb, d):
+ return \
+ bb.data.inherits_class("cross", d) or \
+ bb.data.inherits_class("native", d);
-
- if compile and bb.data.inherits_class("cross", d):
- return create_path( ["gcc", "g++"], "native", bb, d)
+def icc_version(bb, d):
+ if use_icc(bb, d) == "no":
+ return ""
- elif compile and bb.data.inherits_class("native", d):
- return create_path( ["gcc", "g++"], "native", bb, d)
+ parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
+ d.setVar("PARALLEL_MAKE", parallel)
- elif compile and bb.data.inherits_class("kernel", d):
- return create_path( [get_cross_kernel_ver(bb,d), "foo"], "cross-kernel", bb, d)
+ if icc_is_native(bb, d):
+ archive_name = "local-host-env"
+ elif d.expand('${HOST_PREFIX}') == "":
+ bb.fatal(d.expand("${PN}"), " NULL prefix")
+ else:
+ prefix = d.expand('${HOST_PREFIX}' )
+ distro = d.expand('${DISTRO}')
+ target_sys = d.expand('${TARGET_SYS}')
+ float = d.getVar('TARGET_FPU') or "hard"
+ archive_name = prefix + distro + "-" + target_sys + "-" + float
+ if icc_is_kernel(bb, d):
+ archive_name += "-kernel"
- elif not compile or len(prefix) == 0:
- return create_path( ["gcc", "g++"], "native", bb, d)
+ import socket
+ ice_dir = d.expand('${STAGING_DIR_NATIVE}${prefix_native}')
+ tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
- else:
- return create_path( [prefix+"gcc", prefix+"g++"], "cross", bb, d)
+ return tar_file
+def icc_path(bb,d):
+ if icc_is_kernel(bb, d):
+ return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
+ else:
+ prefix = d.expand('${HOST_PREFIX}')
+ return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
+def icc_get_tool(bb, d, tool):
+ if icc_is_native(bb, d):
+ return os.popen("which %s" % tool).read()[:-1]
+ elif icc_is_kernel(bb, d):
+ return os.popen("which %s" % get_cross_kernel_cc(bb, d)).read()[:-1]
+ else:
+ ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
+ target_sys = d.expand('${TARGET_SYS}')
+ return os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
-def icc_version(bb,d):
- return create_env(bb,d)
+set_icecc_env() {
+ ICECC_VERSION="${@icc_version(bb, d)}"
+ if [ "x${ICECC_VERSION}" = "x" ]
+ then
+ return
+ fi
-def check_for_kernel(bb,d):
- if bb.data.inherits_class("kernel", d):
- return "yes"
+ ICE_PATH="${@icc_path(bb, d)}"
+ if [ "x${ICE_PATH}" = "x" ]
+ then
+ return
+ fi
- return "no"
+ ICECC_CC="${@icc_get_tool(bb,d, "gcc")}"
+ ICECC_CXX="${@icc_get_tool(bb,d, "g++")}"
+ if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
+ then
+ return
+ fi
+ ICE_VERSION=`$ICECC_CC -dumpversion`
+ ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
+ if [ ! -x "${ICECC_ENV_EXEC}" ]
+ then
+ return
+ fi
-def get_cross_kernel_ver(bb,d):
+ ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
+ if [ "`dirname "${ICECC_AS}"`" = "." ]
+ then
+ ICECC_AS="`which as`"
+ fi
- return bb.data.expand('${KERNEL_CC}', d).strip() or "gcc"
+ if [ ! -r "${ICECC_VERSION}" ]
+ then
+ mkdir -p "`dirname "${ICECC_VERSION}"`"
+ ${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
+ fi
+
+ export ICECC_VERSION ICECC_CC ICECC_CXX
+ export PATH="$ICE_PATH:$PATH"
+ export CCACHE_PATH="$PATH"
+}
-# set the icecream environment variables
do_configure_prepend() {
- export PATH=${@icc_path(bb,d,False)}$PATH
- export ICECC_CC="gcc"
- export ICECC_CXX="g++"
+ set_icecc_env
}
do_compile_prepend() {
+ set_icecc_env
+}
- export PATH=${@icc_path(bb,d,True)}$PATH
-
- #check if we are building a kernel and select gcc-cross-kernel
- if [ "${@check_for_kernel(bb,d)}" = "yes" ]; then
- export ICECC_CC="${@get_cross_kernel_ver(bb,d)}"
- export ICECC_CXX="${HOST_PREFIX}g++"
- else
- export ICECC_CC="${HOST_PREFIX}gcc"
- export ICECC_CXX="${HOST_PREFIX}g++"
- fi
-
- if [ "${@use_icc_version(bb,d)}" = "yes" ]; then
- export ICECC_VERSION="${@icc_version(bb,d)}"
- else
- export ICECC_VERSION="NONE"
- fi
+do_compile_kernelmodules_prepend() {
+ set_icecc_env
}
+#do_install_prepend() {
+# set_icecc_env
+#}
diff --git a/meta/classes/image-empty.bbclass b/meta/classes/image-empty.bbclass
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/meta/classes/image-empty.bbclass
diff --git a/meta/classes/image-live.bbclass b/meta/classes/image-live.bbclass
new file mode 100644
index 000000000..2f3261ec1
--- /dev/null
+++ b/meta/classes/image-live.bbclass
@@ -0,0 +1,15 @@
+
+AUTO_SYSLINUXCFG = "1"
+INITRD_IMAGE ?= "core-image-minimal-initramfs"
+INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
+SYSLINUX_ROOT = "root=/dev/ram0 "
+SYSLINUX_TIMEOUT ?= "10"
+SYSLINUX_LABELS ?= "boot install"
+LABELS_append = " ${SYSLINUX_LABELS} "
+
+ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
+
+do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs"
+do_bootimg[depends] += "${IMAGE_BASENAME}:do_rootfs"
+
+inherit bootimg
diff --git a/meta/classes/image-mklibs.bbclass b/meta/classes/image-mklibs.bbclass
new file mode 100644
index 000000000..762381520
--- /dev/null
+++ b/meta/classes/image-mklibs.bbclass
@@ -0,0 +1,72 @@
+do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
+
+IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
+
+mklibs_optimize_image_doit() {
+ rm -rf ${WORKDIR}/mklibs
+ mkdir -p ${WORKDIR}/mklibs/dest
+ cd ${IMAGE_ROOTFS}
+ du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
+ for i in `find .`; do file $i; done \
+ | grep ELF \
+ | grep "LSB executable" \
+ | grep "dynamically linked" \
+ | sed "s/:.*//" \
+ | sed "s+^\./++" \
+ > ${WORKDIR}/mklibs/executables.list
+
+ case ${TARGET_ARCH} in
+ powerpc | mips | microblaze )
+ dynamic_loader="${base_libdir}/ld.so.1"
+ ;;
+ powerpc64)
+ dynamic_loader="${base_libdir}/ld64.so.1"
+ ;;
+ x86_64)
+ dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
+ ;;
+ i586 )
+ dynamic_loader="${base_libdir}/ld-linux.so.2"
+ ;;
+ arm )
+ dynamic_loader="${base_libdir}/ld-linux.so.3"
+ ;;
+ * )
+ dynamic_loader="/unknown_dynamic_linker"
+ ;;
+ esac
+
+ mklibs -v \
+ --ldlib ${dynamic_loader} \
+ --sysroot ${PKG_CONFIG_SYSROOT_DIR} \
+ --root ${IMAGE_ROOTFS} \
+ --target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
+ -d ${WORKDIR}/mklibs/dest \
+ `cat ${WORKDIR}/mklibs/executables.list`
+
+ cd ${WORKDIR}/mklibs/dest
+ for i in *
+ do
+ cp $i `find ${IMAGE_ROOTFS} -name $i`
+ done
+
+ cd ${IMAGE_ROOTFS}
+ du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
+
+ echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
+ echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
+}
+
+mklibs_optimize_image() {
+ for img in ${MKLIBS_OPTIMIZED_IMAGES}
+ do
+ if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
+ then
+ mklibs_optimize_image_doit
+ break
+ fi
+ done
+}
+
+
+EXPORT_FUNCTIONS mklibs_optimize_image
diff --git a/meta/classes/image-prelink.bbclass b/meta/classes/image-prelink.bbclass
new file mode 100644
index 000000000..53ef47e4d
--- /dev/null
+++ b/meta/classes/image-prelink.bbclass
@@ -0,0 +1,35 @@
+do_rootfs[depends] += "prelink-native:do_populate_sysroot"
+
+IMAGE_PREPROCESS_COMMAND += "prelink_image; "
+
+prelink_image () {
+# export PSEUDO_DEBUG=4
+# /bin/env | /bin/grep PSEUDO
+# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
+# echo "LD_PRELOAD=$LD_PRELOAD"
+
+ pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
+ echo "Size before prelinking $pre_prelink_size."
+
+ # We need a prelink conf on the filesystem, add one if it's missing
+ if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
+ cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \
+ ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
+ dummy_prelink_conf=true;
+ else
+ dummy_prelink_conf=false;
+ fi
+
+ # prelink!
+ ${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf
+
+ # Remove the prelink.conf if we had to add it.
+ if [ "$dummy_prelink_conf" = "true" ]; then
+ rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
+ fi
+
+ pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
+ echo "Size after prelinking $pre_prelink_size."
+}
+
+EXPORT_FUNCTIONS prelink_image
diff --git a/meta/classes/image-swab.bbclass b/meta/classes/image-swab.bbclass
new file mode 100644
index 000000000..0414653f7
--- /dev/null
+++ b/meta/classes/image-swab.bbclass
@@ -0,0 +1,98 @@
+HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
+SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
+SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
+TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
+TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
+
+SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
+BB_DEFAULT_TASK = "generate_swabber_report"
+
+# Several recipes don't build with parallel make when run under strace
+# Ideally these should be fixed but as a temporary measure disable parallel
+# builds for troublesome recipes
+PARALLEL_MAKE_pn-openssl = ""
+PARALLEL_MAKE_pn-eglibc = ""
+PARALLEL_MAKE_pn-glib-2.0 = ""
+PARALLEL_MAKE_pn-libxml2 = ""
+PARALLEL_MAKE_pn-readline = ""
+PARALLEL_MAKE_pn-util-linux = ""
+PARALLEL_MAKE_pn-binutils = ""
+PARALLEL_MAKE_pn-bison = ""
+PARALLEL_MAKE_pn-cmake = ""
+PARALLEL_MAKE_pn-elfutils = ""
+PARALLEL_MAKE_pn-gcc = ""
+PARALLEL_MAKE_pn-gcc-runtime = ""
+PARALLEL_MAKE_pn-m4 = ""
+PARALLEL_MAKE_pn-opkg = ""
+PARALLEL_MAKE_pn-pkgconfig = ""
+PARALLEL_MAKE_pn-prelink = ""
+PARALLEL_MAKE_pn-qemugl = ""
+PARALLEL_MAKE_pn-rpm = ""
+PARALLEL_MAKE_pn-tcl = ""
+PARALLEL_MAKE_pn-beecrypt = ""
+PARALLEL_MAKE_pn-curl = ""
+PARALLEL_MAKE_pn-gmp = ""
+PARALLEL_MAKE_pn-libmpc = ""
+PARALLEL_MAKE_pn-libxslt = ""
+PARALLEL_MAKE_pn-lzo = ""
+PARALLEL_MAKE_pn-popt = ""
+PARALLEL_MAKE_pn-linux-wrs = ""
+PARALLEL_MAKE_pn-libgcrypt = ""
+PARALLEL_MAKE_pn-gpgme = ""
+PARALLEL_MAKE_pn-udev = ""
+PARALLEL_MAKE_pn-gnutls = ""
+PARALLEL_MAKE_pn-sat-solver = ""
+PARALLEL_MAKE_pn-libzypp = ""
+PARALLEL_MAKE_pn-zypper = ""
+
+python() {
+ # NOTE: It might be useful to detect host infection on native and cross
+ # packages but as it turns out to be pretty hard to do this for all native
+ # and cross packages which aren't swabber-native or one of its dependencies
+ # I have ignored them for now...
+ if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
+ deps = (d.getVarFlag('do_setscene', 'depends') or "").split()
+ deps.append('strace-native:do_populate_sysroot')
+ d.setVarFlag('do_setscene', 'depends', " ".join(deps))
+ logdir = d.expand("${TRACE_LOGDIR}")
+ bb.utils.mkdirhier(logdir)
+ else:
+ d.setVar('STRACEFUNC', '')
+}
+
+STRACEPID = "${@os.getpid()}"
+STRACEFUNC = "imageswab_attachstrace"
+
+do_configure[prefuncs] += "${STRACEFUNC}"
+do_compile[prefuncs] += "${STRACEFUNC}"
+
+imageswab_attachstrace () {
+ STRACE=`which strace`
+
+ if [ -x "$STRACE" ]; then
+ swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
+ fi
+}
+
+do_generate_swabber_report () {
+
+ update_distro ${HOST_DATA}
+
+ # Swabber can't create the directory for us
+ mkdir -p ${SWABBER_REPORT}
+
+ REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
+
+ if [ `which ccache` ] ; then
+ CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
+ fi
+
+ if [ "$(ls -A ${HOST_DATA})" ]; then
+ echo "Generating swabber report"
+ swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
+ else
+ echo "No host data, cannot generate swabber report."
+ fi
+}
+addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
+do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"
diff --git a/meta/classes/image-vmdk.bbclass b/meta/classes/image-vmdk.bbclass
new file mode 100644
index 000000000..736d1d79b
--- /dev/null
+++ b/meta/classes/image-vmdk.bbclass
@@ -0,0 +1,34 @@
+
+NOISO = "1"
+
+SYSLINUX_ROOT = "root=/dev/hda2 "
+SYSLINUX_PROMPT = "0"
+SYSLINUX_TIMEOUT = "1"
+SYSLINUX_LABELS = "boot"
+LABELS_append = " ${SYSLINUX_LABELS} "
+
+# need to define the dependency and the ROOTFS for directdisk
+do_bootdirectdisk[depends] += "${IMAGE_BASENAME}:do_rootfs"
+ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
+
+# creating VMDK relies on having a live hddimg so ensure we
+# inherit it here.
+#inherit image-live
+inherit boot-directdisk
+
+create_vmdk_image () {
+ qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk
+ ln -s ${IMAGE_NAME}.vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vmdk
+
+}
+
+python do_vmdkimg() {
+ bb.build.exec_func('create_vmdk_image', d)
+}
+
+#addtask vmdkimg after do_bootimg before do_build
+addtask vmdkimg after do_bootdirectdisk before do_build
+do_vmdkimg[nostamp] = "1"
+
+do_vmdkimg[depends] += "qemu-native:do_populate_sysroot"
+
diff --git a/meta/classes/image.bbclass b/meta/classes/image.bbclass
index 8225980f7..461073153 100644
--- a/meta/classes/image.bbclass
+++ b/meta/classes/image.bbclass
@@ -1,18 +1,64 @@
inherit rootfs_${IMAGE_PKGTYPE}
+IMAGETEST ?= "dummy"
+inherit imagetest-${IMAGETEST}
+
+inherit gzipnative
+
LICENSE = "MIT"
PACKAGES = ""
-RDEPENDS += "${IMAGE_INSTALL}"
+RDEPENDS += "${IMAGE_INSTALL} ${LINGUAS_INSTALL} ${NORMAL_FEATURE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL}"
+RRECOMMENDS += "${NORMAL_FEATURE_INSTALL_OPTIONAL}"
INHIBIT_DEFAULT_DEPS = "1"
+# IMAGE_FEATURES may contain any available package group
+IMAGE_FEATURES ?= ""
+IMAGE_FEATURES[type] = "list"
+
+# rootfs bootstrap install
+ROOTFS_BOOTSTRAP_INSTALL = "${@base_contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
+
+# packages to install from features
+FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
+FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
+
+# packages to install from features, excluding dev/dbg/doc
+NORMAL_FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(normal_groups(d), d))}"
+NORMAL_FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(normal_groups(d), d))}"
+
+def normal_groups(d):
+ """Return all the IMAGE_FEATURES, with the exception of our special package groups"""
+ extras = set(['dev-pkgs', 'doc-pkgs', 'dbg-pkgs'])
+ features = set(oe.data.typed_value('IMAGE_FEATURES', d))
+ return features.difference(extras)
+
+def normal_pkgs_to_install(d):
+ import oe.packagedata
+
+ to_install = oe.data.typed_value('IMAGE_INSTALL', d)
+ features = normal_groups(d)
+ required = list(oe.packagegroup.required_packages(features, d))
+ optional = list(oe.packagegroup.optional_packages(features, d))
+ all_packages = to_install + required + optional
+
+ recipes = filter(None, [oe.packagedata.recipename(pkg, d) for pkg in all_packages])
+
+ return all_packages + recipes
+
+PACKAGE_GROUP_dbg-pkgs = "${@' '.join('%s-dbg' % pkg for pkg in normal_pkgs_to_install(d))}"
+PACKAGE_GROUP_dbg-pkgs[optional] = "1"
+PACKAGE_GROUP_dev-pkgs = "${@' '.join('%s-dev' % pkg for pkg in normal_pkgs_to_install(d))}"
+PACKAGE_GROUP_dev-pkgs[optional] = "1"
+PACKAGE_GROUP_doc-pkgs = "${@' '.join('%s-doc' % pkg for pkg in normal_pkgs_to_install(d))}"
+PACKAGE_GROUP_doc-pkgs[optional] = "1"
+
# "export IMAGE_BASENAME" not supported at this time
+IMAGE_INSTALL ?= ""
+IMAGE_INSTALL[type] = "list"
IMAGE_BASENAME[export] = "1"
-export PACKAGE_INSTALL ?= "${IMAGE_INSTALL}"
-PACKAGE_INSTALL_ATTEMPTONLY ?= ""
-
-# We need to recursively follow RDEPENDS and RRECOMMENDS for images
-do_rootfs[recrdeptask] += "do_deploy do_populate_staging"
+export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${ROOTFS_BOOTSTRAP_INSTALL} ${FEATURE_INSTALL}"
+PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
# Images are generally built explicitly, do not need to be part of world.
EXCLUDE_FROM_WORLD = "1"
@@ -23,21 +69,29 @@ PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
-do_rootfs[depends] += "makedevs-native:do_populate_staging fakeroot-native:do_populate_staging ldconfig-native:do_populate_staging"
+LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
+LDCONFIGDEPEND_libc-uclibc = ""
-python () {
- import bb
+do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
+do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
+
+IMAGE_TYPE_live = '${@base_contains("IMAGE_FSTYPES", "live", "live", "empty", d)}'
+inherit image-${IMAGE_TYPE_live}
+IMAGE_TYPE_vmdk = '${@base_contains("IMAGE_FSTYPES", "vmdk", "vmdk", "empty", d)}'
+inherit image-${IMAGE_TYPE_vmdk}
- deps = bb.data.getVarFlag('do_rootfs', 'depends', d) or ""
- for type in (bb.data.getVar('IMAGE_FSTYPES', d, True) or "").split():
- for dep in ((bb.data.getVar('IMAGE_DEPENDS_%s' % type, d) or "").split() or []):
- deps += " %s:do_populate_staging" % dep
- for dep in (bb.data.getVar('EXTRA_IMAGEDEPENDS', d, True) or "").split():
- deps += " %s:do_populate_staging" % dep
- bb.data.setVarFlag('do_rootfs', 'depends', deps, d)
+python () {
+ deps = d.getVarFlag('do_rootfs', 'depends') or ""
+ deps += imagetypes_getdepends(d)
+ for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
+ deps += " %s:do_populate_sysroot" % dep
+ d.setVarFlag('do_rootfs', 'depends', deps)
- runtime_mapping_rename("PACKAGE_INSTALL", d)
- runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", d)
+ # If we don't do this we try and run the mapping hooks while parsing which is slow
+ # bitbake should really provide something to let us know this...
+ if d.getVar('BB_WORKERCONTEXT', True) is not None:
+ runtime_mapping_rename("PACKAGE_INSTALL", d)
+ runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", d)
}
#
@@ -49,33 +103,19 @@ python () {
# is searched for in the BBPATH (same as the old version.)
#
def get_devtable_list(d):
- import bb
- devtable = bb.data.getVar('IMAGE_DEVICE_TABLE', d, 1)
+ devtable = d.getVar('IMAGE_DEVICE_TABLE', True)
if devtable != None:
return devtable
str = ""
- devtables = bb.data.getVar('IMAGE_DEVICE_TABLES', d, 1)
+ devtables = d.getVar('IMAGE_DEVICE_TABLES', True)
if devtables == None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
- str += " %s" % bb.which(bb.data.getVar('BBPATH', d, 1), devtable)
+ str += " %s" % bb.which(d.getVar('BBPATH', True), devtable)
return str
-def get_imagecmds(d):
- import bb
- cmds = "\n"
- old_overrides = bb.data.getVar('OVERRIDES', d, 0)
- for type in bb.data.getVar('IMAGE_FSTYPES', d, True).split():
- localdata = bb.data.createCopy(d)
- bb.data.setVar('OVERRIDES', '%s:%s' % (type, old_overrides), localdata)
- bb.data.update_data(localdata)
- cmd = "\t#Code for image type " + type + "\n"
- cmd += "\t${IMAGE_CMD_" + type + "}\n"
- cmd += "\tcd ${DEPLOY_DIR_IMAGE}/\n"
- cmd += "\trm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}." + type + "\n"
- cmd += "\tln -s ${IMAGE_NAME}.rootfs." + type + " ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}." + type + "\n\n"
- cmds += bb.data.expand(cmd, localdata)
- return cmds
+IMAGE_CLASSES ?= "image_types"
+inherit ${IMAGE_CLASSES}
IMAGE_POSTPROCESS_COMMAND ?= ""
MACHINE_POSTPROCESS_COMMAND ?= ""
@@ -84,23 +124,37 @@ ROOTFS_POSTPROCESS_COMMAND ?= ""
# some default locales
IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
-LINGUAS_INSTALL = "${@" ".join(map(lambda s: "locale-base-%s" % s, bb.data.getVar('IMAGE_LINGUAS', d, 1).split()))}"
+LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', True).split()))}"
+
+PSEUDO_PASSWD = "${IMAGE_ROOTFS}"
do_rootfs[nostamp] = "1"
do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[lockfiles] = "${IMAGE_ROOTFS}.lock"
+do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
do_build[nostamp] = "1"
# Must call real_do_rootfs() from inside here, rather than as a separate
# task, so that we have a single fakeroot context for the whole process.
+do_rootfs[umask] = "022"
+
fakeroot do_rootfs () {
- set -x
- rm -rf ${IMAGE_ROOTFS}
+ #set -x
+ # When use the rpm incremental image generation, don't remove the rootfs
+ if [ "${INC_RPM_IMAGE_GEN}" != "1" -o "${IMAGE_PKGTYPE}" != "rpm" ]; then
+ rm -rf ${IMAGE_ROOTFS}
+ fi
+ rm -rf ${MULTILIB_TEMP_ROOTFS}
mkdir -p ${IMAGE_ROOTFS}
mkdir -p ${DEPLOY_DIR_IMAGE}
- if [ "${USE_DEVFS}" != "1" ]; then
+ cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOY_DIR_IMAGE}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt || true
+
+ # If "${IMAGE_ROOTFS}/dev" exists, then the device had been made by
+ # the previous build
+ if [ "${USE_DEVFS}" != "1" -a ! -r "${IMAGE_ROOTFS}/dev" ]; then
for devtable in ${@get_devtable_list(d)}; do
+ # Always return ture since there maybe already one when use the
+ # incremental image generation
makedevs -r ${IMAGE_ROOTFS} -D $devtable
done
fi
@@ -109,9 +163,12 @@ fakeroot do_rootfs () {
insert_feed_uris
- # Run ldconfig on the image to create a valid cache
- # (new format for cross arch compatibility)
- ldconfig -r ${IMAGE_ROOTFS} -c new
+ if [ "x${LDCONFIGDEPEND}" != "x" ]; then
+ # Run ldconfig on the image to create a valid cache
+ # (new format for cross arch compatibility)
+ echo executing: ldconfig -r ${IMAGE_ROOTFS} -c new -v
+ ldconfig -r ${IMAGE_ROOTFS} -c new -v
+ fi
# (re)create kernel modules dependencies
# This part is done by kernel-module-* postinstall scripts but if image do
@@ -121,12 +178,11 @@ fakeroot do_rootfs () {
KERNEL_VERSION=`cat ${STAGING_KERNEL_DIR}/kernel-abiversion`
mkdir -p ${IMAGE_ROOTFS}/lib/modules/$KERNEL_VERSION
- ${TARGET_SYS}-depmod-2.6 -a -b ${IMAGE_ROOTFS} -F ${STAGING_KERNEL_DIR}/System.map-$KERNEL_VERSION $KERNEL_VERSION
+ depmod -a -b ${IMAGE_ROOTFS} -F ${STAGING_KERNEL_DIR}/System.map-$KERNEL_VERSION $KERNEL_VERSION
fi
${IMAGE_PREPROCESS_COMMAND}
- ROOTFS_SIZE=`du -ks ${IMAGE_ROOTFS}|awk '{size = ${IMAGE_EXTRA_SPACE} + $1; print (size > ${IMAGE_ROOTFS_SIZE} ? size : ${IMAGE_ROOTFS_SIZE}) }'`
${@get_imagecmds(d)}
${IMAGE_POSTPROCESS_COMMAND}
@@ -153,7 +209,6 @@ insert_feed_uris () {
}
log_check() {
- set +x
for target in $*
do
lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
@@ -162,28 +217,100 @@ log_check() {
if test -e "$lf_path"
then
- rootfs_${IMAGE_PKGTYPE}_log_check $target $lf_path
+ ${IMAGE_PKGTYPE}_log_check $target $lf_path
else
echo "Cannot find logfile [$lf_path]"
fi
echo "Logfile is clean"
done
+}
+
+MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|"
+MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
+MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
+
+multilib_generate_python_file() {
+ cat >${MULTILIB_CHECK_FILE} <<EOF
+import sys, os, os.path
+import re,filecmp
- set -x
+allow_rep=re.compile(re.sub("\|$","","${MULTILIBRE_ALLOW_REP}"))
+error_promt="Multilib check error:"
+
+files={}
+dirs=raw_input()
+for dir in dirs.split():
+ for root, subfolers, subfiles in os.walk(dir):
+ for file in subfiles:
+ item=os.path.join(root,file)
+ key=str(os.path.join("/",os.path.relpath(item,dir)))
+
+ valid=True;
+ if files.has_key(key):
+ #check whether the file is allow to replace
+ if allow_rep.match(key):
+ valid=True
+ else:
+ if not filecmp.cmp(files[key],item):
+ valid=False
+ print("%s duplicate files %s %s is not the same\n" % (error_promt, item, files[key]))
+ sys.exit(1)
+
+ #pass the check, add to list
+ if valid:
+ files[key]=item
+EOF
}
-# set '*' as the rootpassword so the images
-# can decide if they want it or not
+multilib_sanity_check() {
+ multilib_generate_python_file
+ echo $@ | python ${MULTILIB_CHECK_FILE}
+}
+
+get_split_linguas() {
+ for translation in ${IMAGE_LINGUAS}; do
+ translation_split=$(echo ${translation} | awk -F '-' '{print $1}')
+ echo ${translation}
+ echo ${translation_split}
+ done | sort | uniq
+}
+
+rootfs_install_all_locales() {
+ # Generate list of installed packages for which additional locale packages might be available
+ INSTALLED_PACKAGES=`list_installed_packages | egrep -v -- "(-locale-|^locale-base-|-dev$|-doc$|^kernel|^glibc|^ttf|^task|^perl|^python)"`
+
+ # Generate a list of locale packages that exist
+ SPLIT_LINGUAS=`get_split_linguas`
+ PACKAGES_TO_INSTALL=""
+ for lang in $SPLIT_LINGUAS; do
+ for pkg in $INSTALLED_PACKAGES; do
+ existing_pkg=`rootfs_check_package_exists $pkg-locale-$lang`
+ if [ "$existing_pkg" != "" ]; then
+ PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL $existing_pkg"
+ fi
+ done
+ done
+
+ # Install the packages, if any
+ if [ "$PACKAGES_TO_INSTALL" != "" ]; then
+ rootfs_install_packages $PACKAGES_TO_INSTALL
+ fi
+
+ # Workaround for broken shell function dependencies
+ if false ; then
+ get_split_linguas
+ list_installed_packages
+ rootfs_check_package_exists
+ fi
+}
+# set '*' as the root password so the images
+# can decide if they want it or not
zap_root_password () {
sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
}
-create_etc_timestamp() {
- date +%2m%2d%2H%2M%Y >${IMAGE_ROOTFS}/etc/timestamp
-}
-
# Turn any symbolic /sbin/init link into a file
remove_init_link () {
if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
@@ -202,8 +329,10 @@ make_zimage_symlink_relative () {
write_image_manifest () {
rootfs_${IMAGE_PKGTYPE}_write_manifest
- rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
- ln -s ${IMAGE_NAME}.rootfs.manifest ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
+ if [ -n "${IMAGE_LINK_NAME}" ]; then
+ rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
+ ln -s ${IMAGE_NAME}.rootfs.manifest ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
+ fi
}
# Make login manager(s) enable automatic login.
@@ -215,7 +344,7 @@ set_image_autologin () {
# Can be use to create /etc/timestamp during image construction to give a reasonably
# sane default time setting
rootfs_update_timestamp () {
- date "+%m%d%H%M%Y" >${IMAGE_ROOTFS}/etc/timestamp
+ date -u +%4Y%2m%2d%2H%2M >${IMAGE_ROOTFS}/etc/timestamp
}
# Prevent X from being started
@@ -225,7 +354,29 @@ rootfs_no_x_startup () {
fi
}
-# export the zap_root_password, create_etc_timestamp and remote_init_link
-EXPORT_FUNCTIONS zap_root_password create_etc_timestamp remove_init_link do_rootfs make_zimage_symlink_relative set_image_autologin rootfs_update_timestamp rootfs_no_x_startup
+rootfs_trim_schemas () {
+ for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
+ do
+ # Need this in case no files exist
+ if [ -e $schema ]; then
+ oe-trim-schemas $schema > $schema.new
+ mv $schema.new $schema
+ fi
+ done
+}
+
+EXPORT_FUNCTIONS zap_root_password remove_init_link do_rootfs make_zimage_symlink_relative set_image_autologin rootfs_update_timestamp rootfs_no_x_startup
+
+do_fetch[noexec] = "1"
+do_unpack[noexec] = "1"
+do_patch[noexec] = "1"
+do_configure[noexec] = "1"
+do_compile[noexec] = "1"
+do_install[noexec] = "1"
+do_populate_sysroot[noexec] = "1"
+do_package[noexec] = "1"
+do_package_write_ipk[noexec] = "1"
+do_package_write_deb[noexec] = "1"
+do_package_write_rpm[noexec] = "1"
-addtask rootfs before do_build after do_install
+addtask rootfs before do_build
diff --git a/meta/classes/image_types.bbclass b/meta/classes/image_types.bbclass
new file mode 100644
index 000000000..55f122eb8
--- /dev/null
+++ b/meta/classes/image_types.bbclass
@@ -0,0 +1,232 @@
+def get_imagecmds(d):
+ cmds = "\n"
+ old_overrides = d.getVar('OVERRIDES', 0)
+
+ alltypes = d.getVar('IMAGE_FSTYPES', True).split()
+ types = []
+ ctypes = d.getVar('COMPRESSIONTYPES', True).split()
+ cimages = {}
+
+ # Filter out all the compressed images from types
+ for type in alltypes:
+ basetype = None
+ for ctype in ctypes:
+ if type.endswith("." + ctype):
+ basetype = type[:-len("." + ctype)]
+ if basetype not in types:
+ types.append(basetype)
+ if basetype not in cimages:
+ cimages[basetype] = []
+ if ctype not in cimages[basetype]:
+ cimages[basetype].append(ctype)
+ break
+ if not basetype and type not in types:
+ types.append(type)
+
+ # Live and VMDK images will be processed via inheriting
+ # bbclass and does not get processed here.
+ # vmdk depend on live images also depend on ext3 so ensure its present
+ # Note: we need to ensure ext3 is in alltypes, otherwise, subimages may
+ # not contain ext3 and the .rootfs.ext3 file won't be created.
+ if "vmdk" in types:
+ if "ext3" not in types:
+ types.append("ext3")
+ if "ext3" not in alltypes:
+ alltypes.append("ext3")
+ types.remove("vmdk")
+ if "live" in types:
+ if "ext3" not in types:
+ types.append("ext3")
+ if "ext3" not in alltypes:
+ alltypes.append("ext3")
+ types.remove("live")
+
+ if d.getVar('IMAGE_LINK_NAME', True):
+ cmds += " rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.*"
+
+ for type in types:
+ ccmd = []
+ subimages = []
+ localdata = bb.data.createCopy(d)
+ localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
+ bb.data.update_data(localdata)
+ localdata.setVar('type', type)
+ if type in cimages:
+ for ctype in cimages[type]:
+ ccmd.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
+ subimages.append(type + "." + ctype)
+ if type not in alltypes:
+ ccmd.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}"))
+ else:
+ subimages.append(type)
+ localdata.setVar('ccmd', "\n".join(ccmd))
+ localdata.setVar('subimages', " ".join(subimages))
+ cmd = localdata.getVar("IMAGE_CMD", True)
+ localdata.setVar('cmd', cmd)
+ cmds += "\n" + localdata.getVar("runimagecmd", True)
+ return cmds
+
+# The default aligment of the size of the rootfs is set to 1KiB. In case
+# you're using the SD card emulation of a QEMU system simulator you may
+# set this value to 2048 (2MiB alignment).
+IMAGE_ROOTFS_ALIGNMENT ?= "1"
+
+runimagecmd () {
+ # Image generation code for image type ${type}
+ # The base_size gets calculated:
+ # - initial size determined by `du -ks` of the IMAGE_ROOTFS
+ # - then multiplied by the IMAGE_OVERHEAD_FACTOR
+ # - then rounded up to IMAGE_ROOTFS_ALIGNMENT
+ # - finally tested against IMAGE_ROOTFS_SIZE
+ ROOTFS_SIZE=`du -ks ${IMAGE_ROOTFS}|awk '{base_size = $1 * ${IMAGE_OVERHEAD_FACTOR} + ${IMAGE_ROOTFS_ALIGNMENT} - 1; base_size -= base_size % ${IMAGE_ROOTFS_ALIGNMENT}; print ((base_size > ${IMAGE_ROOTFS_SIZE} ? base_size : ${IMAGE_ROOTFS_SIZE}) + ${IMAGE_ROOTFS_EXTRA_SPACE}) }'`
+ ${cmd}
+ # Now create the needed compressed versions
+ cd ${DEPLOY_DIR_IMAGE}/
+ ${ccmd}
+ # And create the symlinks
+ if [ -n "${IMAGE_LINK_NAME}" ]; then
+ for type in ${subimages}; do
+ ln -s ${IMAGE_NAME}.rootfs.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type
+ done
+ fi
+}
+
+def imagetypes_getdepends(d):
+ def adddep(depstr, deps):
+ for i in (depstr or "").split():
+ if i not in deps:
+ deps.append(i)
+
+ deps = []
+ ctypes = d.getVar('COMPRESSIONTYPES', True).split()
+ for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
+ if type == "vmdk" or type == "live":
+ type = "ext3"
+ basetype = type
+ for ctype in ctypes:
+ if type.endswith("." + ctype):
+ basetype = type[:-len("." + ctype)]
+ adddep(d.getVar("COMPRESS_DEPENDS_%s" % ctype, True), deps)
+ break
+ adddep(d.getVar('IMAGE_DEPENDS_%s' % basetype, True) , deps)
+
+ depstr = ""
+ for dep in deps:
+ depstr += " " + dep + ":do_populate_sysroot"
+ return depstr
+
+
+XZ_COMPRESSION_LEVEL ?= "-e -9"
+XZ_INTEGRITY_CHECK ?= "crc32"
+
+IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 -n ${EXTRA_IMAGECMD}"
+IMAGE_CMD_sum.jffs2 = "${IMAGE_CMD_jffs2} && sumtool -i ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 \
+ -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.sum.jffs2 -n ${EXTRA_IMAGECMD}"
+
+IMAGE_CMD_cramfs = "mkcramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}"
+
+IMAGE_CMD_ext2 () {
+ rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN} && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
+ genext2fs -b $ROOTFS_SIZE -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2
+ mv ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext2
+ rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
+}
+
+IMAGE_CMD_ext3 () {
+ genext2fs -b $ROOTFS_SIZE -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext3
+ tune2fs -j ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext3
+}
+
+oe_mkext4fs () {
+ genext2fs -b $ROOTFS_SIZE -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} $1
+ tune2fs -O extents,uninit_bg,dir_index,has_journal $1
+ e2fsck -yfDC0 $1 || chk=$?
+ case $chk in
+ 0|1|2)
+ ;;
+ *)
+ return $chk
+ ;;
+ esac
+}
+
+IMAGE_CMD_ext4 () {
+ oe_mkext4fs ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext4
+}
+
+IMAGE_CMD_btrfs () {
+ mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
+}
+
+IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD_squashfs-lzma = "mksquashfs-lzma ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzma ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD_tar = "cd ${IMAGE_ROOTFS} && tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar ."
+
+CPIO_TOUCH_INIT () {
+ if [ ! -L ${IMAGE_ROOTFS}/init ]
+ then
+ touch ${IMAGE_ROOTFS}/init
+ fi
+}
+IMAGE_CMD_cpio () {
+ ${CPIO_TOUCH_INIT}
+ cd ${IMAGE_ROOTFS} && (find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
+}
+
+UBI_VOLNAME ?= "${MACHINE}-rootfs"
+
+IMAGE_CMD_ubi () {
+ echo \[ubifs\] > ubinize.cfg
+ echo mode=ubi >> ubinize.cfg
+ echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs >> ubinize.cfg
+ echo vol_id=0 >> ubinize.cfg
+ echo vol_type=dynamic >> ubinize.cfg
+ echo vol_name=${UBI_VOLNAME} >> ubinize.cfg
+ echo vol_flags=autoresize >> ubinize.cfg
+ mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS} && ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg
+}
+IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
+
+EXTRA_IMAGECMD = ""
+
+inherit siteinfo
+JFFS2_ENDIANNESS ?= "${@base_conditional('SITEINFO_ENDIANNESS', 'le', '--little-endian', '--big-endian', d)}"
+JFFS2_ERASEBLOCK ?= "0x40000"
+EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers"
+
+# Change these if you want default genext2fs behavior (i.e. create minimal inode number)
+EXTRA_IMAGECMD_ext2 ?= "-i 8192"
+EXTRA_IMAGECMD_ext3 ?= "-i 8192"
+EXTRA_IMAGECMD_ext4 ?= "-i 8192"
+EXTRA_IMAGECMD_btrfs ?= ""
+
+IMAGE_DEPENDS = ""
+IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
+IMAGE_DEPENDS_sum.jffs2 = "mtd-utils-native"
+IMAGE_DEPENDS_cramfs = "cramfs-native"
+IMAGE_DEPENDS_ext2 = "genext2fs-native"
+IMAGE_DEPENDS_ext3 = "genext2fs-native e2fsprogs-native"
+IMAGE_DEPENDS_ext4 = "genext2fs-native e2fsprogs-native"
+IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
+IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
+IMAGE_DEPENDS_squashfs-lzma = "squashfs-lzma-tools-native"
+IMAGE_DEPENDS_ubi = "mtd-utils-native"
+IMAGE_DEPENDS_ubifs = "mtd-utils-native"
+
+# This variable is available to request which values are suitable for IMAGE_FSTYPES
+IMAGE_TYPES = "jffs2 sum.jffs2 cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs live squashfs squashfs-lzma ubi tar tar.gz tar.bz2 tar.xz cpio cpio.gz cpio.xz cpio.lzma vmdk"
+
+COMPRESSIONTYPES = "gz bz2 lzma xz"
+COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
+COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.gz"
+COMPRESS_CMD_bz2 = "bzip2 -f -k ${IMAGE_NAME}.rootfs.${type}"
+COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.rootfs.${type}"
+COMPRESS_DEPENDS_lzma = "xz-native"
+COMPRESS_DEPENDS_gz = ""
+COMPRESS_DEPENDS_bz2 = ""
+COMPRESS_DEPENDS_xz = "xz-native"
+
+RUNNABLE_IMAGE_TYPES ?= "ext2 ext3"
+RUNNABLE_MACHINE_PATTERNS ?= "qemu"
+
+DEPLOYABLE_IMAGE_TYPES ?= "hddimg iso"
diff --git a/meta/classes/image_types_uboot.bbclass b/meta/classes/image_types_uboot.bbclass
new file mode 100644
index 000000000..07837b566
--- /dev/null
+++ b/meta/classes/image_types_uboot.bbclass
@@ -0,0 +1,23 @@
+inherit image_types kernel-arch
+
+oe_mkimage () {
+ mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
+ -d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
+}
+
+COMPRESSIONTYPES += "gz.u-boot bz2.u-boot lzma.u-boot u-boot"
+
+COMPRESS_DEPENDS_u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_u-boot = "oe_mkimage ${IMAGE_NAME}.rootfs.${type} none"
+
+COMPRESS_DEPENDS_gz.u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_gz.u-boot = "${COMPRESS_CMD_gz}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.gz gzip"
+
+COMPRESS_DEPENDS_bz2.u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_bz2.u-boot = "${COMPRESS_CMD_bz2}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.bz2 bzip2"
+
+COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
+COMPRESS_CMD_lzma.u-boot = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma"
+
+IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot"
+
diff --git a/meta/classes/imagetest-dummy.bbclass b/meta/classes/imagetest-dummy.bbclass
new file mode 100644
index 000000000..bcacae604
--- /dev/null
+++ b/meta/classes/imagetest-dummy.bbclass
@@ -0,0 +1 @@
+# dummy testclass file
diff --git a/meta/classes/imagetest-qemu.bbclass b/meta/classes/imagetest-qemu.bbclass
new file mode 100644
index 000000000..d56b44b5c
--- /dev/null
+++ b/meta/classes/imagetest-qemu.bbclass
@@ -0,0 +1,223 @@
+# Test related variables
+# By default, TEST_DIR is created under WORKDIR
+TEST_DIR ?= "${WORKDIR}/qemuimagetest"
+TEST_LOG ?= "${LOG_DIR}/qemuimagetests"
+TEST_RESULT ?= "${TEST_DIR}/result"
+TEST_TMP ?= "${TEST_DIR}/tmp"
+TEST_SCEN ?= "sanity"
+TEST_STATUS ?= "${TEST_TMP}/status"
+TARGET_IPSAVE ?= "${TEST_TMP}/target_ip"
+TEST_SERIALIZE ?= "1"
+
+python do_qemuimagetest() {
+ qemuimagetest_main(d)
+}
+addtask qemuimagetest before do_build after do_rootfs
+do_qemuimagetest[nostamp] = "1"
+do_qemuimagetest[depends] += "qemu-native:do_populate_sysroot"
+
+python do_qemuimagetest_standalone() {
+ qemuimagetest_main(d)
+}
+addtask qemuimagetest_standalone
+do_qemuimagetest_standalone[nostamp] = "1"
+do_qemuimagetest_standalone[depends] += "qemu-native:do_populate_sysroot"
+
+def qemuimagetest_main(d):
+ import sys
+ import re
+ import os
+ import shutil
+
+ """
+ Test Controller for automated testing.
+ """
+
+ casestr = re.compile(r'(?P<scen>\w+\b):(?P<case>\S+$)')
+ resultstr = re.compile(r'\s*(?P<case>\w+)\s*(?P<pass>\d+)\s*(?P<fail>\d+)\s*(?P<noresult>\d+)')
+ machine = d.getVar('MACHINE', True)
+ pname = d.getVar('PN', True)
+
+ """function to save test cases running status"""
+ def teststatus(test, status, index, length):
+ test_status = d.getVar('TEST_STATUS', True)
+ if not os.path.exists(test_status):
+ raise bb.build.FuncFailed("No test status file existing under TEST_TMP")
+
+ f = open(test_status, "w")
+ f.write("\t%-15s%-15s%-15s%-15s\n" % ("Case", "Status", "Number", "Total"))
+ f.write("\t%-15s%-15s%-15s%-15s\n" % (case, status, index, length))
+ f.close()
+
+ """funtion to run each case under scenario"""
+ def runtest(scen, case, fulltestpath):
+ resultpath = d.getVar('TEST_RESULT', True)
+ tmppath = d.getVar('TEST_TMP', True)
+
+ """initialize log file for testcase"""
+ logpath = d.getVar('TEST_LOG', True)
+ bb.utils.mkdirhier("%s/%s" % (logpath, scen))
+ caselog = os.path.join(logpath, "%s/log_%s.%s" % (scen, case, d.getVar('DATETIME', True)))
+ os.system("touch %s" % caselog)
+
+ """export TEST_TMP, TEST_RESULT, DEPLOY_DIR and QEMUARCH"""
+ os.environ["PATH"] = d.getVar("PATH", True)
+ os.environ["TEST_TMP"] = tmppath
+ os.environ["TEST_RESULT"] = resultpath
+ os.environ["DEPLOY_DIR"] = d.getVar("DEPLOY_DIR", True)
+ os.environ["QEMUARCH"] = machine
+ os.environ["QEMUTARGET"] = pname
+ os.environ["DISPLAY"] = d.getVar("DISPLAY", True)
+ os.environ["COREBASE"] = d.getVar("COREBASE", True)
+ os.environ["TOPDIR"] = d.getVar("TOPDIR", True)
+ os.environ["OE_TMPDIR"] = d.getVar("TMPDIR", True)
+ os.environ["TEST_STATUS"] = d.getVar("TEST_STATUS", True)
+ os.environ["TARGET_IPSAVE"] = d.getVar("TARGET_IPSAVE", True)
+ os.environ["TEST_SERIALIZE"] = d.getVar("TEST_SERIALIZE", True)
+ os.environ["SDK_NAME"] = d.getVar("SDK_NAME", True)
+
+ """run Test Case"""
+ bb.note("Run %s test in scenario %s" % (case, scen))
+ os.system("%s" % fulltestpath)
+
+ """function to check testcase list and remove inappropriate cases"""
+ def check_list(list):
+ final_list = []
+ for test in list:
+ (scen, case, fullpath) = test
+
+ """Skip rpm/zypper if package_rpm not set for PACKAGE_CLASSES"""
+ if case.find("zypper") != -1 or case.find("rpm") != -1:
+ if d.getVar("PACKAGE_CLASSES", True).find("rpm", 0, 11) == -1:
+ bb.note("skip rpm/zypper cases since package_rpm not set in PACKAGE_CLASSES")
+ continue
+ else:
+ final_list.append((scen, case, fullpath))
+ else:
+ final_list.append((scen, case, fullpath))
+
+ if not final_list:
+ raise bb.build.FuncFailed("There is no suitable testcase for this target")
+
+ return final_list
+
+ """Generate testcase list in runtime"""
+ def generate_list(testlist):
+ list = []
+ final_list = []
+ if len(testlist) == 0:
+ raise bb.build.FuncFailed("No testcase defined in TEST_SCEN")
+
+ """check testcase folder and add case list according to TEST_SCEN"""
+ for item in testlist.split(" "):
+ n = casestr.match(item)
+ if n:
+ item = n.group('scen')
+ casefile = n.group('case')
+ for dir in d.getVar("QEMUIMAGETESTS", True).split():
+ fulltestcase = os.path.join(dir, item, casefile)
+ if not os.path.isfile(fulltestcase):
+ raise bb.build.FuncFailed("Testcase %s not found" % fulltestcase)
+ list.append((item, casefile, fulltestcase))
+ else:
+ for dir in d.getVar("QEMUIMAGETESTS", True).split():
+ scenlist = os.path.join(dir, "scenario", machine, pname)
+ if not os.path.isfile(scenlist):
+ raise bb.build.FuncFailed("No scenario list file named %s found" % scenlist)
+
+ f = open(scenlist, "r")
+ for line in f:
+ if item != line.split()[0]:
+ continue
+ else:
+ casefile = line.split()[1]
+
+ fulltestcase = os.path.join(dir, item, casefile)
+ if not os.path.isfile(fulltestcase):
+ raise bb.build.FuncFailed("Testcase %s not found" % fulltestcase)
+ list.append((item, casefile, fulltestcase))
+ final_list = check_list(list)
+ return final_list
+
+ """Clean tmp folder for testing"""
+ def clean_tmp():
+ tmppath = d.getVar('TEST_TMP', True)
+
+ if os.path.isdir(tmppath):
+ for f in os.listdir(tmppath):
+ tmpfile = os.path.join(tmppath, f)
+ if os.path.isfile(tmpfile):
+ os.remove(tmpfile)
+ elif os.path.isdir(tmpfile):
+ shutil.rmtree(tmpfile, True)
+
+ """Before running testing, clean temp folder first"""
+ clean_tmp()
+
+ """check testcase folder and create test log folder"""
+ testpath = d.getVar('TEST_DIR', True)
+ bb.utils.mkdirhier(testpath)
+
+ logpath = d.getVar('TEST_LOG', True)
+ bb.utils.mkdirhier(logpath)
+
+ tmppath = d.getVar('TEST_TMP', True)
+ bb.utils.mkdirhier(tmppath)
+
+ """initialize test status file"""
+ test_status = d.getVar('TEST_STATUS', True)
+ if os.path.exists(test_status):
+ os.remove(test_status)
+ os.system("touch %s" % test_status)
+
+ """initialize result file"""
+ resultpath = d.getVar('TEST_RESULT', True)
+ bb.utils.mkdirhier(resultpath)
+ resultfile = os.path.join(resultpath, "testresult.%s" % d.getVar('DATETIME', True))
+ sresultfile = os.path.join(resultpath, "testresult.log")
+
+ machine = d.getVar('MACHINE', True)
+
+ if os.path.exists(sresultfile):
+ os.remove(sresultfile)
+ os.system("touch %s" % resultfile)
+ os.symlink(resultfile, sresultfile)
+ f = open(sresultfile, "a")
+ f.write("\tTest Result for %s %s\n" % (machine, pname))
+ f.write("\t%-15s%-15s%-15s%-15s\n" % ("Testcase", "PASS", "FAIL", "NORESULT"))
+ f.close()
+
+ """generate pre-defined testcase list"""
+ testlist = d.getVar('TEST_SCEN', True)
+ fulllist = generate_list(testlist)
+
+ """Begin testing"""
+ for index,test in enumerate(fulllist):
+ (scen, case, fullpath) = test
+ teststatus(case, "running", index, (len(fulllist) - 1))
+ runtest(scen, case, fullpath)
+ teststatus(case, "finished", index, (len(fulllist) - 1))
+
+ """Print Test Result"""
+ ret = 0
+ f = open(sresultfile, "r")
+ for line in f:
+ m = resultstr.match(line)
+ if m:
+ if m.group('fail') == "1":
+ ret = 1
+ elif m.group('noresult') == "1":
+ ret = 2
+ line = line.strip('\n')
+ bb.note(line)
+ else:
+ line = line.strip('\n')
+ bb.note(line)
+ f.close()
+
+ """Clean temp files for testing"""
+ clean_tmp()
+
+ if ret != 0:
+ raise bb.build.FuncFailed("Some testcases fail, pls. check test result and test log!!!")
+
diff --git a/meta/classes/insane.bbclass b/meta/classes/insane.bbclass
index 2b0c28477..8c4a83a34 100644
--- a/meta/classes/insane.bbclass
+++ b/meta/classes/insane.bbclass
@@ -11,6 +11,10 @@
# -Check if packages contains .debug directories or .so files
# where they should be in -dev or -dbg
# -Check if config.log contains traces to broken autoconf tests
+# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
+# into exec_prefix
+# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
+# files under exec_prefix
#
@@ -19,9 +23,14 @@
# The package.bbclass can help us here.
#
inherit package
-PACKAGE_DEPENDS += "pax-utils-native desktop-file-utils-native"
+PACKAGE_DEPENDS += "pax-utils-native ${QADEPENDS}"
PACKAGEFUNCS += " do_package_qa "
+# unsafe-references-in-binaries requires prelink-rtld from
+# prelink-native, but we don't want this DEPENDS for -native builds
+QADEPENDS = "prelink-native"
+QADEPENDS_virtclass-native = ""
+QADEPENDS_virtclass-nativesdk = ""
#
# dictionary for elf headers
@@ -31,328 +40,457 @@ PACKAGEFUNCS += " do_package_qa "
# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
def package_qa_get_machine_dict():
return {
+ "darwin9" : {
+ "arm" : (40, 0, 0, True, 32),
+ },
"linux" : {
- "arm" : (40, 97, 0, True, True),
- "armeb": (40, 97, 0, False, True),
- "powerpc": (20, 0, 0, False, True),
- "i386": ( 3, 0, 0, True, True),
- "i486": ( 3, 0, 0, True, True),
- "i586": ( 3, 0, 0, True, True),
- "i686": ( 3, 0, 0, True, True),
- "x86_64": (62, 0, 0, True, False),
- "ia64": (50, 0, 0, True, False),
- "alpha": (36902, 0, 0, True, False),
- "hppa": (15, 3, 0, False, True),
- "m68k": ( 4, 0, 0, False, True),
- "mips": ( 8, 0, 0, False, True),
- "mipsel": ( 8, 0, 0, True, True),
- "s390": (22, 0, 0, False, True),
- "sh4": (42, 0, 0, True, True),
- "sparc": ( 2, 0, 0, False, True),
+ "arm" : (40, 97, 0, True, 32),
+ "armeb": (40, 97, 0, False, 32),
+ "powerpc": (20, 0, 0, False, 32),
+ "powerpc64": (21, 0, 0, False, 64),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": (62, 0, 0, True, 64),
+ "ia64": (50, 0, 0, True, 64),
+ "alpha": (36902, 0, 0, True, 64),
+ "hppa": (15, 3, 0, False, 32),
+ "m68k": ( 4, 0, 0, False, 32),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "s390": (22, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+ "sparc": ( 2, 0, 0, False, 32),
},
"linux-uclibc" : {
- "arm" : ( 40, 97, 0, True, True),
- "armeb": ( 40, 97, 0, False, True),
- "powerpc": ( 20, 0, 0, False, True),
- "i386": ( 3, 0, 0, True, True),
- "i486": ( 3, 0, 0, True, True),
- "i586": ( 3, 0, 0, True, True),
- "i686": ( 3, 0, 0, True, True),
- "mipsel": ( 8, 0, 0, True, True),
- "avr32": (6317, 0, 0, False, True),
+ "arm" : ( 40, 97, 0, True, 32),
+ "armeb": ( 40, 97, 0, False, 32),
+ "powerpc": ( 20, 0, 0, False, 32),
+ "i386": ( 3, 0, 0, True, 32),
+ "i486": ( 3, 0, 0, True, 32),
+ "i586": ( 3, 0, 0, True, 32),
+ "i686": ( 3, 0, 0, True, 32),
+ "x86_64": ( 62, 0, 0, True, 64),
+ "mips": ( 8, 0, 0, False, 32),
+ "mipsel": ( 8, 0, 0, True, 32),
+ "avr32": (6317, 0, 0, False, 32),
+ "sh4": (42, 0, 0, True, 32),
+
},
"uclinux-uclibc" : {
- "bfin": ( 106, 0, 0, True, True),
+ "bfin": ( 106, 0, 0, True, 32),
},
"linux-gnueabi" : {
- "arm" : (40, 0, 0, True, True),
- "armeb" : (40, 0, 0, False, True),
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
},
- "linux-uclibcgnueabi" : {
- "arm" : (40, 0, 0, True, True),
- "armeb" : (40, 0, 0, False, True),
+ "linux-uclibceabi" : {
+ "arm" : (40, 0, 0, True, 32),
+ "armeb" : (40, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnuspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-uclibcspe" : {
+ "powerpc": (20, 0, 0, False, 32),
+ },
+ "linux-gnu" : {
+ "microblaze": (47787, 0, 0, False, 32),
+ "microblazeel": (47787, 0, 0, True, 32),
+ },
+ "linux-gnux32" : {
+ "x86_64": (62, 0, 0, True, 32),
},
-
}
-# factory for a class, embedded in a method
-def package_qa_get_elf(path, bits32):
- class ELFFile:
- EI_NIDENT = 16
-
- EI_CLASS = 4
- EI_DATA = 5
- EI_VERSION = 6
- EI_OSABI = 7
- EI_ABIVERSION = 8
- # possible values for EI_CLASS
- ELFCLASSNONE = 0
- ELFCLASS32 = 1
- ELFCLASS64 = 2
+# Currently not being used by default "desktop"
+WARN_QA ?= "ldflags useless-rpaths rpaths unsafe-references-in-binaries unsafe-references-in-scripts staticdev"
+ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch la2 pkgconfig la perms"
- # possible value for EI_VERSION
- EV_CURRENT = 1
+def package_qa_clean_path(path,d):
+ """ Remove the common prefix from the path. In this case it is the TMPDIR"""
+ return path.replace(d.getVar('TMPDIR',True),"")
- # possible values for EI_DATA
- ELFDATANONE = 0
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
+def package_qa_write_error(error, d):
+ logfile = d.getVar('QA_LOGFILE', True)
+ if logfile:
+ p = d.getVar('P', True)
+ f = file( logfile, "a+")
+ print >> f, "%s: %s" % (p, error)
+ f.close()
- def my_assert(self, expectation, result):
- if not expectation == result:
- #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
- raise Exception("This does not work as expected")
+def package_qa_handle_error(error_class, error_msg, d):
+ package_qa_write_error(error_msg, d)
+ if error_class in (d.getVar("ERROR_QA", True) or "").split():
+ bb.error("QA Issue: %s" % error_msg)
+ return False
+ else:
+ bb.warn("QA Issue: %s" % error_msg)
+ return True
- def __init__(self, name):
- self.name = name
+QAPATHTEST[rpaths] = "package_qa_check_rpath"
+def package_qa_check_rpath(file,name, d, elf, messages):
+ """
+ Check for dangerous RPATHs
+ """
+ if not elf:
+ return
- def open(self):
- self.file = file(self.name, "r")
- self.data = self.file.read(ELFFile.EI_NIDENT+4)
+ scanelf = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'scanelf')
+ bad_dirs = [d.getVar('TMPDIR', True) + "/work", d.getVar('STAGING_DIR_TARGET', True)]
+ bad_dir_test = d.getVar('TMPDIR', True)
+ if not os.path.exists(scanelf):
+ bb.fatal("Can not check RPATH, scanelf (part of pax-utils-native) not found")
- self.my_assert(len(self.data), ELFFile.EI_NIDENT+4)
- self.my_assert(self.data[0], chr(0x7f) )
- self.my_assert(self.data[1], 'E')
- self.my_assert(self.data[2], 'L')
- self.my_assert(self.data[3], 'F')
- if bits32 :
- self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32))
- else:
- self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64))
- self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) )
+ if not bad_dirs[0] in d.getVar('WORKDIR', True):
+ bb.fatal("This class assumed that WORKDIR is ${TMPDIR}/work... Not doing any check")
- self.sex = self.data[ELFFile.EI_DATA]
- if self.sex == chr(ELFFile.ELFDATANONE):
- raise Exception("self.sex == ELFDATANONE")
- elif self.sex == chr(ELFFile.ELFDATA2LSB):
- self.sex = "<"
- elif self.sex == chr(ELFFile.ELFDATA2MSB):
- self.sex = ">"
- else:
- raise Exception("Unknown self.sex")
+ output = os.popen("%s -B -F%%r#F '%s'" % (scanelf,file))
+ txt = output.readline().split()
+ for line in txt:
+ for dir in bad_dirs:
+ if dir in line:
+ messages.append("package %s contains bad RPATH %s in file %s" % (name, line, file))
- def osAbi(self):
- return ord(self.data[ELFFile.EI_OSABI])
+QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
+def package_qa_check_useless_rpaths(file, name, d, elf, messages):
+ """
+ Check for RPATHs that are useless but not dangerous
+ """
+ if not elf:
+ return
- def abiVersion(self):
- return ord(self.data[ELFFile.EI_ABIVERSION])
+ objdump = d.getVar('OBJDUMP', True)
+ env_path = d.getVar('PATH', True)
- def isLittleEndian(self):
- return self.sex == "<"
+ libdir = d.getVar("libdir", True)
+ base_libdir = d.getVar("base_libdir", True)
- def isBigEngian(self):
- return self.sex == ">"
+ import re
+ rpath_re = re.compile("\s+RPATH\s+(.*)")
+ for line in os.popen("LC_ALL=C PATH=%s %s -p '%s' 2> /dev/null" % (env_path, objdump, file), "r"):
+ m = rpath_re.match(line)
+ if m:
+ rpath = m.group(1)
+ if rpath == libdir or rpath == base_libdir:
+ # The dynamic linker searches both these places anyway. There is no point in
+ # looking there again.
+ messages.append("%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
- def machine(self):
- """
- We know the sex stored in self.sex and we
- know the position
- """
- import struct
- (a,) = struct.unpack(self.sex+"H", self.data[18:20])
- return a
+QAPATHTEST[dev-so] = "package_qa_check_dev"
+def package_qa_check_dev(path, name, d, elf, messages):
+ """
+ Check for ".so" library symlinks in non-dev packages
+ """
- return ELFFile(path)
+ if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-nativesdk") and path.endswith(".so") and os.path.islink(path):
+ messages.append("non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
+QAPATHTEST[staticdev] = "package_qa_check_staticdev"
+def package_qa_check_staticdev(path, name, d, elf, messages):
+ """
+ Check for ".a" library in non-staticdev packages
+ There are a number of exceptions to this rule, -pic packages can contain
+ static libraries, the _nonshared.a belong with their -dev packages and
+ libgcc.a, libgcov.a will be skipped in their packages
+ """
-# Known Error classes
-# 0 - non dev contains .so
-# 1 - package contains a dangerous RPATH
-# 2 - package depends on debug package
-# 3 - non dbg contains .so
-# 4 - wrong architecture
-# 5 - .la contains installed=yes or reference to the workdir
-# 6 - .pc contains reference to /usr/include or workdir
-# 7 - the desktop file is not valid
-# 8 - .la contains reference to the workdir
+ if not name.endswith("-pic") and not name.endswith("-staticdev") and path.endswith(".a") and not path.endswith("_nonshared.a"):
+ messages.append("non -staticdev package contains static .a library: %s path '%s'" % \
+ (name, package_qa_clean_path(path,d)))
-def package_qa_clean_path(path,d):
- """ Remove the common prefix from the path. In this case it is the TMPDIR"""
- import bb
- return path.replace(bb.data.getVar('TMPDIR',d,True),"")
-
-def package_qa_make_fatal_error(error_class, name, path,d):
+QAPATHTEST[debug-files] = "package_qa_check_dbg"
+def package_qa_check_dbg(path, name, d, elf, messages):
"""
- decide if an error is fatal
-
- TODO: Load a whitelist of known errors
+ Check for ".debug" files or directories outside of the dbg package
"""
- return not error_class in [0, 5, 7, 8, 9]
-def package_qa_write_error(error_class, name, path, d):
+ if not "-dbg" in name:
+ if '.debug' in path.split(os.path.sep):
+ messages.append("non debug package contains .debug directory: %s path %s" % \
+ (name, package_qa_clean_path(path,d)))
+
+QAPATHTEST[perms] = "package_qa_check_perm"
+def package_qa_check_perm(path,name,d, elf, messages):
"""
- Log the error
+ Check the permission of files
"""
- import bb, os
+ return
- ERROR_NAMES =[
- "non dev contains .so",
- "package contains RPATH",
- "package depends on debug package",
- "non dbg contains .debug",
- "wrong architecture",
- "evil hides inside the .la",
- "evil hides inside the .pc",
- "the desktop file is not valid",
- ".la contains reference to the workdir",
- "package contains reference to tmpdir paths",
- ]
+QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
+def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
+ """
+ Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
+ """
+ if unsafe_references_skippable(path, name, d):
+ return
- log_path = os.path.join( bb.data.getVar('T', d, True), "log.qa_package" )
- f = file( log_path, "a+")
- print >> f, "%s, %s, %s" % \
- (ERROR_NAMES[error_class], name, package_qa_clean_path(path,d))
- f.close()
+ if elf:
+ import subprocess as sub
+ pn = d.getVar('PN', True)
- logfile = bb.data.getVar('QA_LOGFILE', d, True)
- if logfile:
- p = bb.data.getVar('P', d, True)
- f = file( logfile, "a+")
- print >> f, "%s, %s, %s, %s" % \
- (p, ERROR_NAMES[error_class], name, package_qa_clean_path(path,d))
- f.close()
+ exec_prefix = d.getVar('exec_prefix', True)
+ sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
+ sysroot_path_usr = sysroot_path + exec_prefix
-def package_qa_handle_error(error_class, error_msg, name, path, d):
- import bb
- fatal = package_qa_make_fatal_error(error_class, name, path, d)
- if fatal:
- bb.error("QA Issue: %s" % error_msg)
- else:
- # Use bb.warn here when it works
- bb.note("QA Issue: %s" % error_msg)
- package_qa_write_error(error_class, name, path, d)
+ try:
+ ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
+ except bb.process.CmdError:
+ error_msg = pn + ": prelink-rtld aborted when processing %s" % path
+ package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
+ return False
- return not fatal
+ if sysroot_path_usr in ldd_output:
+ error_msg = pn + ": %s links to something under exec_prefix" % path
+ package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
+ error_msg = "ldd reports: %s" % ldd_output
+ package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
+ return False
-def package_qa_check_rpath(file,name,d):
- """
- Check for dangerous RPATHs
- """
- import bb, os
- sane = True
- scanelf = os.path.join(bb.data.getVar('STAGING_BINDIR_NATIVE',d,True),'scanelf')
- bad_dir = bb.data.getVar('TMPDIR', d, True) + "/work"
- bad_dir_test = bb.data.getVar('TMPDIR', d, True)
- if not os.path.exists(scanelf):
- bb.fatal("Can not check RPATH, scanelf (part of pax-utils-native) not found")
+QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
+def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
+ """
+ Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
+ """
+ if unsafe_references_skippable(path, name, d):
+ return
- if not bad_dir in bb.data.getVar('WORKDIR', d, True):
- bb.fatal("This class assumed that WORKDIR is ${TMPDIR}/work... Not doing any check")
+ if not elf:
+ import stat
+ pn = d.getVar('PN', True)
- output = os.popen("%s -B -F%%r#F '%s'" % (scanelf,file))
- txt = output.readline().split()
- for line in txt:
- if bad_dir in line:
- error_msg = "package %s contains bad RPATH %s in file %s" % (name, line, file)
- sane = package_qa_handle_error(1, error_msg, name, file, d)
+ # Ensure we're checking an executable script
+ statinfo = os.stat(path)
+ if bool(statinfo.st_mode & stat.S_IXUSR):
+ # grep shell scripts for possible references to /exec_prefix/
+ exec_prefix = d.getVar('exec_prefix', True)
+ statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path)
+ if os.system(statement) == 0:
+ error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
+ package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
+ error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
+ package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
- return sane
+def unsafe_references_skippable(path, name, d):
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
+ return True
-def package_qa_check_devdbg(path, name,d):
- """
- Check for debug remains inside the binary or
- non dev packages containing
- """
+ if "-dbg" in name or "-dev" in name:
+ return True
- import bb, os
- sane = True
+ # Other package names to skip:
+ if name.startswith("kernel-module-"):
+ return True
- if not "-dev" in name:
- if path[-3:] == ".so" and os.path.islink(path):
- error_msg = "non -dev package contains symlink .so: %s path '%s'" % \
- (name, package_qa_clean_path(path,d))
- sane = package_qa_handle_error(0, error_msg, name, path, d)
+ # Skip symlinks
+ if os.path.islink(path):
+ return True
- if not "-dbg" in name:
- if '.debug' in path:
- error_msg = "non debug package contains .debug directory: %s path %s" % \
- (name, package_qa_clean_path(path,d))
- sane = package_qa_handle_error(3, error_msg, name, path, d)
+ # Skip unusual rootfs layouts which make these tests irrelevant
+ exec_prefix = d.getVar('exec_prefix', True)
+ if exec_prefix == "":
+ return True
- return sane
+ pkgdest = d.getVar('PKGDEST', True)
+ pkgdest = pkgdest + "/" + name
+ pkgdest = os.path.abspath(pkgdest)
+ base_bindir = pkgdest + d.getVar('base_bindir', True)
+ base_sbindir = pkgdest + d.getVar('base_sbindir', True)
+ base_libdir = pkgdest + d.getVar('base_libdir', True)
+ bindir = pkgdest + d.getVar('bindir', True)
+ sbindir = pkgdest + d.getVar('sbindir', True)
+ libdir = pkgdest + d.getVar('libdir', True)
-def package_qa_check_perm(path,name,d):
- """
- Check the permission of files
- """
- sane = True
- return sane
+ if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
+ return True
+
+ # Skip files not in base_[bindir|sbindir|libdir]
+ path = os.path.abspath(path)
+ if not (base_bindir in path or base_sbindir in path or base_libdir in path):
+ return True
+
+ return False
-def package_qa_check_arch(path,name,d):
+QAPATHTEST[arch] = "package_qa_check_arch"
+def package_qa_check_arch(path,name,d, elf, messages):
"""
Check if archs are compatible
"""
- import bb, os
- sane = True
- target_os = bb.data.getVar('TARGET_OS', d, True)
- target_arch = bb.data.getVar('TARGET_ARCH', d, True)
+ if not elf:
+ return
+
+ target_os = d.getVar('TARGET_OS', True)
+ target_arch = d.getVar('TARGET_ARCH', True)
+ provides = d.getVar('PROVIDES', d, True)
+ bpn = d.getVar('BPN', True)
# FIXME: Cross package confuse this check, so just skip them
- if bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d):
- return True
+ for s in ['cross', 'nativesdk', 'cross-canadian']:
+ if bb.data.inherits_class(s, d):
+ return
# avoid following links to /usr/bin (e.g. on udev builds)
# we will check the files pointed to anyway...
if os.path.islink(path):
- return True
+ return
#if this will throw an exception, then fix the dict above
- (machine, osabi, abiversion, littleendian, bits32) \
+ (machine, osabi, abiversion, littleendian, bits) \
= package_qa_get_machine_dict()[target_os][target_arch]
- elf = package_qa_get_elf(path, bits32)
- try:
- elf.open()
- except:
- return True
# Check the architecture and endiannes of the binary
- if not machine == elf.machine():
- error_msg = "Architecture did not match (%d to %d) on %s" % \
- (machine, elf.machine(), package_qa_clean_path(path,d))
- sane = package_qa_handle_error(4, error_msg, name, path, d)
+ if not ((machine == elf.machine()) or \
+ ("virtual/kernel" in provides) and (target_os == "linux-gnux32")):
+ messages.append("Architecture did not match (%d to %d) on %s" % \
+ (machine, elf.machine(), package_qa_clean_path(path,d)))
+ elif not ((bits == elf.abiSize()) or \
+ ("virtual/kernel" in provides) and (target_os == "linux-gnux32")):
+ messages.append("Bit size did not match (%d to %d) %s on %s" % \
+ (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
elif not littleendian == elf.isLittleEndian():
- error_msg = "Endiannes did not match (%d to %d) on %s" % \
- (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d))
- sane = package_qa_handle_error(4, error_msg, name, path, d)
-
- return sane
+ messages.append("Endiannes did not match (%d to %d) on %s" % \
+ (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
-def package_qa_check_desktop(path, name, d):
+QAPATHTEST[desktop] = "package_qa_check_desktop"
+def package_qa_check_desktop(path, name, d, elf, messages):
"""
Run all desktop files through desktop-file-validate.
"""
- import bb, os
- sane = True
if path.endswith(".desktop"):
- desktop_file_validate = os.path.join(bb.data.getVar('STAGING_BINDIR_NATIVE',d,True),'desktop-file-validate')
+ desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
- sane = package_qa_handle_error(7, l.strip(), name, path, d)
+ messages.append("Desktop file issue: " + l.strip())
- return sane
+QAPATHTEST[ldflags] = "package_qa_hash_style"
+def package_qa_hash_style(path, name, d, elf, messages):
+ """
+ Check if the binary has the right hash style...
+ """
+
+ if not elf:
+ return
+
+ if os.path.islink(path):
+ return
+
+ gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
+ if not gnu_hash:
+ gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
+ if not gnu_hash:
+ return
+
+ objdump = d.getVar('OBJDUMP', True)
+ env_path = d.getVar('PATH', True)
+
+ sane = False
+ has_syms = False
-def package_qa_check_buildpaths(path, name, d):
+ # If this binary has symbols, we expect it to have GNU_HASH too.
+ for line in os.popen("LC_ALL=C PATH=%s %s -p '%s' 2> /dev/null" % (env_path, objdump, path), "r"):
+ if "SYMTAB" in line:
+ has_syms = True
+ if "GNU_HASH" in line:
+ sane = True
+ if "[mips32]" in line or "[mips64]" in line:
+ sane = True
+
+ if has_syms and not sane:
+ messages.append("No GNU_HASH in the elf binary: '%s'" % path)
+
+
+QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
+def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
Check for build paths inside target files and error if not found in the whitelist
"""
- import bb, os
- sane = True
-
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
- return True
+ return
# Ignore symlinks
if os.path.islink(path):
- return True
+ return
- tmpdir = bb.data.getVar('TMPDIR', d, True)
+ tmpdir = d.getVar('TMPDIR', True)
file_content = open(path).read()
if tmpdir in file_content:
- error_msg = "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d)
- sane = package_qa_handle_error(9, error_msg, name, path, d)
+ messages.append("File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
+
+def package_qa_check_license(workdir, d):
+ """
+ Check for changes in the license files
+ """
+ import tempfile
+ sane = True
+
+ lic_files = d.getVar('LIC_FILES_CHKSUM', True)
+ lic = d.getVar('LICENSE', True)
+ pn = d.getVar('PN', True)
+
+ if lic == "CLOSED":
+ return True
+
+ if not lic_files:
+ # just throw a warning now. Once licensing data in entered for enough of the recipes,
+ # this will be converted into error and False will be returned.
+ bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
+ return False
+
+ srcdir = d.getVar('S', True)
+
+ for url in lic_files.split():
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ srclicfile = os.path.join(srcdir, path)
+ if not os.path.isfile(srclicfile):
+ raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile)
+
+ if 'md5' not in parm:
+ bb.error(pn + ": md5 checksum is not specified for ", url)
+ return False
+ beginline, endline = 0, 0
+ if 'beginline' in parm:
+ beginline = int(parm['beginline'])
+ if 'endline' in parm:
+ endline = int(parm['endline'])
+
+ if (not beginline) and (not endline):
+ md5chksum = bb.utils.md5_file(srclicfile)
+ else:
+ fi = open(srclicfile, 'r')
+ fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
+ tmplicfile = fo.name;
+ lineno = 0
+ linesout = 0
+ for line in fi:
+ lineno += 1
+ if (lineno >= beginline):
+ if ((lineno <= endline) or not endline):
+ fo.write(line)
+ linesout += 1
+ else:
+ break
+ fo.flush()
+ fo.close()
+ fi.close()
+ md5chksum = bb.utils.md5_file(tmplicfile)
+ os.unlink(tmplicfile)
+
+ if parm['md5'] == md5chksum:
+ bb.note (pn + ": md5 checksum matched for ", url)
+ else:
+ bb.error (pn + ": md5 data is not matching for ", url)
+ bb.error (pn + ": The new md5 checksum is ", md5chksum)
+ bb.error (pn + ": Check if the license information has changed in")
+ sane = False
+
return sane
def package_qa_check_staged(path,d):
@@ -364,10 +502,9 @@ def package_qa_check_staged(path,d):
to find the one responsible for the errors easily even
if we look at every .pc and .la file
"""
- import os, bb
sane = True
- tmpdir = bb.data.getVar('TMPDIR', d, True)
+ tmpdir = d.getVar('TMPDIR', True)
workdir = os.path.join(tmpdir, "work")
installed = "installed=yes"
@@ -382,126 +519,214 @@ def package_qa_check_staged(path,d):
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
- if file[-2:] == "la":
+ if file.endswith(".la"):
file_content = open(path).read()
- # Don't check installed status for native/cross packages
- if not bb.data.inherits_class("native", d) and not bb.data.inherits_class("cross", d):
- if installed in file_content:
- error_msg = "%s failed sanity test (installed) in path %s" % (file,root)
- sane = package_qa_handle_error(5, error_msg, "staging", path, d)
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
- sane = package_qa_handle_error(8, error_msg, "staging", path, d)
- elif file[-2:] == "pc":
+ sane = package_qa_handle_error("la", error_msg, d)
+ elif file.endswith(".pc"):
file_content = open(path).read()
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
- sane = package_qa_handle_error(6, error_msg, "staging", path, d)
+ sane = package_qa_handle_error("pkgconfig", error_msg, d)
return sane
# Walk over all files in a directory and call func
-def package_qa_walk(path, funcs, package,d):
- import os
- sane = True
+def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
+ import oe.qa
+
+ #if this will throw an exception, then fix the dict above
+ target_os = d.getVar('TARGET_OS', True)
+ target_arch = d.getVar('TARGET_ARCH', True)
+ warnings = []
+ errors = []
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
- for func in funcs:
- if not func(path, package,d):
- sane = False
+ elf = oe.qa.ELFFile(path)
+ try:
+ elf.open()
+ except:
+ elf = None
+ for func in warnfuncs:
+ func(path, package, d, elf, warnings)
+ for func in errorfuncs:
+ func(path, package, d, elf, errors)
- return sane
+ for w in warnings:
+ bb.warn("QA Issue: %s" % w)
+ package_qa_write_error(w, d)
+ for e in errors:
+ bb.error("QA Issue: %s" % e)
+ package_qa_write_error(e, d)
+
+ return len(errors) == 0
+
+def package_qa_check_rdepends(pkg, pkgdest, skip, d):
+ # Don't do this check for kernel/module recipes, there aren't too many debug/development
+ # packages and you can get false positives e.g. on kernel-module-lirc-dev
+ if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
+ return True
-def package_qa_check_rdepends(pkg, workdir, d):
- import bb
sane = True
if not "-dbg" in pkg and not "task-" in pkg and not "-image" in pkg:
# Copied from package_ipk.bbclass
# boiler plate to update the data
localdata = bb.data.createCopy(d)
- root = "%s/install/%s" % (workdir, pkg)
+ root = "%s/%s" % (pkgdest, pkg)
- bb.data.setVar('ROOT', '', localdata)
- bb.data.setVar('ROOT_%s' % pkg, root, localdata)
- pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, True)
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
if not pkgname:
pkgname = pkg
- bb.data.setVar('PKG', pkgname, localdata)
+ localdata.setVar('PKG', pkgname)
- overrides = bb.data.getVar('OVERRIDES', localdata)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
- overrides = bb.data.expand(overrides, localdata)
- bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+ localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
# Now check the RDEPENDS
- rdepends = bb.utils.explode_deps(bb.data.getVar('RDEPENDS', localdata, True) or "")
+ rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
# Now do the sanity check!!!
for rdepend in rdepends:
- if "-dbg" in rdepend:
+ if "-dbg" in rdepend and "debug-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkgname,rdepend)
- sane = package_qa_handle_error(2, error_msg, pkgname, rdepend, d)
+ sane = package_qa_handle_error("debug-deps", error_msg, d)
+ if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
+ error_msg = "%s rdepends on %s" % (pkgname, rdepend)
+ sane = package_qa_handle_error("dev-deps", error_msg, d)
return sane
# The PACKAGE FUNC to scan each package
python do_package_qa () {
bb.note("DO PACKAGE QA")
- workdir = bb.data.getVar('WORKDIR', d, True)
- packages = bb.data.getVar('PACKAGES',d, True)
+
+ logdir = d.getVar('T', True)
+ pkg = d.getVar('PN', True)
+
+ # Check the compile log for host contamination
+ compilelog = os.path.join(logdir,"log.do_compile")
+
+ if os.path.exists(compilelog):
+ statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
+ if os.system(statement) == 0:
+ bb.warn("%s: The compile log indicates that host include and/or library paths were used.\n \
+ Please check the log '%s' for more information." % (pkg, compilelog))
+
+ # Check the install log for host contamination
+ installlog = os.path.join(logdir,"log.do_install")
+
+ if os.path.exists(installlog):
+ statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
+ if os.system(statement) == 0:
+ bb.warn("%s: The install log indicates that host include and/or library paths were used.\n \
+ Please check the log '%s' for more information." % (pkg, installlog))
+
+ # Scan the packages...
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES', True)
# no packages should be scanned
if not packages:
return
- checks = [package_qa_check_rpath, package_qa_check_devdbg,
- package_qa_check_perm, package_qa_check_arch,
- package_qa_check_desktop, package_qa_check_buildpaths]
+ testmatrix = d.getVarFlags("QAPATHTEST")
+
+ g = globals()
walk_sane = True
rdepends_sane = True
for package in packages.split():
- if bb.data.getVar('INSANE_SKIP_' + package, d, True):
- bb.note("Package: %s (skipped)" % package)
- continue
+ skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
+ if skip:
+ bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
+ warnchecks = []
+ for w in (d.getVar("WARN_QA", True) or "").split():
+ if w in skip:
+ continue
+ if w in testmatrix and testmatrix[w] in g:
+ warnchecks.append(g[testmatrix[w]])
+ errorchecks = []
+ for e in (d.getVar("ERROR_QA", True) or "").split():
+ if e in skip:
+ continue
+ if e in testmatrix and testmatrix[e] in g:
+ errorchecks.append(g[testmatrix[e]])
bb.note("Checking Package: %s" % package)
- path = "%s/install/%s" % (workdir, package)
- if not package_qa_walk(path, checks, package, d):
+ path = "%s/%s" % (pkgdest, package)
+ if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
walk_sane = False
- if not package_qa_check_rdepends(package, workdir, d):
+ if not package_qa_check_rdepends(package, pkgdest, skip, d):
rdepends_sane = False
+
if not walk_sane or not rdepends_sane:
bb.fatal("QA run found fatal errors. Please consider fixing them.")
bb.note("DONE with PACKAGE QA")
}
-# The Staging Func, to check all staging
-addtask qa_staging after do_populate_staging before do_build
python do_qa_staging() {
bb.note("QA checking staging")
- if not package_qa_check_staged(bb.data.getVar('STAGING_LIBDIR',d,True), d):
+ if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}/${STAGING_LIBDIR}'), d):
bb.fatal("QA staging was broken by the package built above")
}
-# Check broken config.log files
-addtask qa_configure after do_configure before do_compile
python do_qa_configure() {
- bb.note("Checking sanity of the config.log file")
- import os
- for root, dirs, files in os.walk(bb.data.getVar('WORKDIR', d, True)):
- statement = "grep 'CROSS COMPILE Badness:' %s > /dev/null" % \
+ configs = []
+ workdir = d.getVar('WORKDIR', True)
+ bb.note("Checking autotools environment for common misconfiguration")
+ for root, dirs, files in os.walk(workdir):
+ statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \
os.path.join(root,"config.log")
if "config.log" in files:
if os.system(statement) == 0:
- bb.fatal("""This autoconf log indicates errors, it looked at host includes.
+ bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
Rerun configure task after fixing this. The path was '%s'""" % root)
+
+ if "configure.ac" in files:
+ configs.append(os.path.join(root,"configure.ac"))
+ if "configure.in" in files:
+ configs.append(os.path.join(root, "configure.in"))
+
+ cnf = d.getVar('EXTRA_OECONF', True) or ""
+ if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
+ ml = d.getVar("MLPREFIX", True) or ""
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
+ gt = "gettext-native"
+ elif bb.data.inherits_class('cross-canadian', d):
+ gt = "gettext-nativesdk"
+ else:
+ gt = "virtual/" + ml + "gettext"
+ deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
+ if gt not in deps:
+ for config in configs:
+ gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
+ if os.system(gnu) == 0:
+ bb.fatal("""%s required but not in DEPENDS for file %s.
+Missing inherit gettext?""" % (gt, config))
+
+ if not package_qa_check_license(workdir, d):
+ bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
+}
+# The Staging Func, to check all staging
+#addtask qa_staging after do_populate_sysroot before do_build
+do_populate_sysroot[postfuncs] += "do_qa_staging "
+
+# Check broken config.log files, for packages requiring Gettext which don't
+# have it in DEPENDS and for correct LIC_FILES_CHKSUM
+#addtask qa_configure after do_configure before do_compile
+do_configure[postfuncs] += "do_qa_configure "
+
+python () {
+ tests = d.getVar('WARN_QA', True) + " " + d.getVar('ERROR_QA', True)
+ if tests.find("desktop") != -1:
+ d.appendVar("PACKAGE_DEPENDS", "desktop-file-utils-native")
}
diff --git a/meta/classes/insserv.bbclass b/meta/classes/insserv.bbclass
index d8e88c70f..14290a77e 100644
--- a/meta/classes/insserv.bbclass
+++ b/meta/classes/insserv.bbclass
@@ -1,4 +1,4 @@
-do_rootfs[depends] += "insserv-native:do_populate_staging"
+do_rootfs[depends] += "insserv-native:do_populate_sysroot"
run_insserv () {
insserv -p ${IMAGE_ROOTFS}/etc/init.d -c ${STAGING_ETCDIR_NATIVE}/insserv.conf
}
diff --git a/meta/classes/kernel-arch.bbclass b/meta/classes/kernel-arch.bbclass
index 2ce0f9727..d37c1fb2e 100644
--- a/meta/classes/kernel-arch.bbclass
+++ b/meta/classes/kernel-arch.bbclass
@@ -5,36 +5,41 @@
#
valid_archs = "alpha cris ia64 \
- x86_64 i386 x86 \
- m68knommu m68k ppc powerpc ppc64 \
+ i386 x86 \
+ m68knommu m68k ppc powerpc powerpc64 ppc64 \
sparc sparc64 \
arm arm26 \
m32r mips \
sh sh64 um h8300 \
parisc s390 v850 \
- avr32 blackfin"
+ avr32 blackfin \
+ microblaze"
def map_kernel_arch(a, d):
- import bb, re
+ import re
- valid_archs = bb.data.getVar('valid_archs', d, 1).split()
+ valid_archs = d.getVar('valid_archs', True).split()
- if re.match('(i.86|athlon)$', a): return 'i386'
- elif re.match('arm26$', a): return 'arm26'
- elif re.match('armeb$', a): return 'arm'
- elif re.match('mipsel$', a): return 'mips'
- elif re.match('sh(3|4)$', a): return 'sh'
- elif re.match('bfin', a): return 'blackfin'
- elif a in valid_archs: return a
+ if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
+ elif re.match('arm26$', a): return 'arm26'
+ elif re.match('armeb$', a): return 'arm'
+ elif re.match('mipsel$', a): return 'mips'
+ elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
+ elif re.match('sh(3|4)$', a): return 'sh'
+ elif re.match('bfin', a): return 'blackfin'
+ elif re.match('microblazeel', a): return 'microblaze'
+ elif a in valid_archs: return a
else:
bb.error("cannot map '%s' to a linux kernel architecture" % a)
-export ARCH = "${@map_kernel_arch(bb.data.getVar('TARGET_ARCH', d, 1), d)}"
+export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', True), d)}"
def map_uboot_arch(a, d):
- if a == "powerpc":
- return "ppc"
+ import re
+
+ if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
+ elif re.match('i.86$', a): return 'x86'
return a
-export UBOOT_ARCH = "${@map_uboot_arch(bb.data.getVar('ARCH', d, 1), d)}"
+export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', True), d)}"
diff --git a/meta/classes/kernel-yocto.bbclass b/meta/classes/kernel-yocto.bbclass
new file mode 100644
index 000000000..e93163051
--- /dev/null
+++ b/meta/classes/kernel-yocto.bbclass
@@ -0,0 +1,294 @@
+S = "${WORKDIR}/linux"
+
+# remove tasks that modify the source tree in case externalsrc is inherited
+SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_patch"
+
+# returns local (absolute) path names for all valid patches in the
+# src_uri
+def find_patches(d):
+ patches=src_patches(d)
+ patch_list=[]
+ for p in patches:
+ _, _, local, _, _, _ = bb.decodeurl(p)
+ patch_list.append(local)
+
+ return patch_list
+
+# returns all the elements from the src uri that are .scc files
+def find_sccs(d):
+ sources=src_patches(d, True)
+ sources_list=[]
+ for s in sources:
+ base, ext = os.path.splitext(os.path.basename(s))
+ if ext and ext in ('.scc' '.cfg'):
+ sources_list.append(s)
+ elif base and base in 'defconfig':
+ sources_list.append(s)
+
+ return sources_list
+
+# this is different from find_patches, in that it returns a colon separated
+# list of <patches>:<subdir> instead of just a list of patches
+def find_urls(d):
+ patches=src_patches(d)
+ fetch = bb.fetch2.Fetch([], d)
+ patch_list=[]
+ for p in patches:
+ _, _, local, _, _, _ = bb.decodeurl(p)
+ for url in fetch.urls:
+ urldata = fetch.ud[url]
+ if urldata.localpath == local:
+ patch_list.append(local+':'+urldata.path)
+
+ return patch_list
+
+
+do_patch() {
+ cd ${S}
+
+ # if kernel tools are available in-tree, they are preferred
+ # and are placed on the path before any external tools. Unless
+ # the external tools flag is set, in that case we do nothing.
+ if [ -f "${S}/scripts/util/configme" ]; then
+ if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
+ PATH=${S}/scripts/util:${PATH}
+ fi
+ fi
+
+ kbranch=${KBRANCH}
+
+ # if we have a defined/set meta branch we should not be generating
+ # any meta data. The passed branch has what we need.
+ if [ -n "${KMETA}" ]; then
+ createme_flags="--disable-meta-gen"
+ fi
+ createme ${createme_flags} ${ARCH} ${kbranch}
+ if [ $? -ne 0 ]; then
+ echo "ERROR. Could not create ${kbranch}"
+ exit 1
+ fi
+
+ sccs="${@" ".join(find_sccs(d))}"
+ patches="${@" ".join(find_patches(d))}"
+
+ set +e
+ # add any explicitly referenced features onto the end of the feature
+ # list that is passed to the kernel build scripts.
+ if [ -n "${KERNEL_FEATURES}" ]; then
+ for feat in ${KERNEL_FEATURES}; do
+ addon_features="$addon_features --feature $feat"
+ done
+ fi
+
+ # updates or generates the target description
+ updateme --branch ${kbranch} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
+ ${addon_features} ${ARCH} ${KMACHINE} ${sccs} ${patches}
+ if [ $? -ne 0 ]; then
+ echo "ERROR. Could not update ${kbranch}"
+ exit 1
+ fi
+
+ # executes and modifies the source tree as required
+ patchme ${KMACHINE}
+ if [ $? -ne 0 ]; then
+ echo "ERROR. Could not modify ${kbranch}"
+ exit 1
+ fi
+}
+
+do_kernel_checkout() {
+ set +e
+
+ # A linux yocto SRC_URI should use the bareclone option. That
+ # ensures that all the branches are available in the WORKDIR version
+ # of the repository. If it wasn't passed, we should detect it, and put
+ # out a useful error message
+ if [ -d "${WORKDIR}/git/" ] && [ -d "${WORKDIR}/git/.git" ]; then
+ # we build out of {S}, so ensure that ${S} is clean and present
+ rm -rf ${S}
+ mkdir -p ${S}/.git
+
+ echo "WARNING. ${WORKDIR}/git is not a bare clone."
+ echo "Ensure that the SRC_URI includes the 'bareclone=1' option."
+
+ # we can fix up the kernel repository, but at the least the meta
+ # branch must be present. The machine branch may be created later.
+ mv ${WORKDIR}/git/.git ${S}
+ rm -rf ${WORKDIR}/git/
+ cd ${S}
+ if [ -n "${KMETA}" ]; then
+ git branch -a | grep -q ${KMETA}
+ if [ $? -ne 0 ]; then
+ echo "ERROR. The branch '${KMETA}' is required and was not"
+ echo "found. Ensure that the SRC_URI points to a valid linux-yocto"
+ echo "kernel repository"
+ exit 1
+ fi
+ fi
+ fi
+ if [ -d "${WORKDIR}/git/" ] && [ ! -d "${WORKDIR}/git/.git" ]; then
+ # we build out of {S}, so ensure that ${S} is clean and present
+ rm -rf ${S}
+ mkdir -p ${S}/.git
+
+ mv ${WORKDIR}/git/* ${S}/.git
+ rm -rf ${WORKDIR}/git/
+ cd ${S}
+ git config core.bare false
+ fi
+ # end debare
+
+ # convert any remote branches to local tracking ones
+ for i in `git branch -a | grep remotes | grep -v HEAD`; do
+ b=`echo $i | cut -d' ' -f2 | sed 's%remotes/origin/%%'`;
+ git show-ref --quiet --verify -- "refs/heads/$b"
+ if [ $? -ne 0 ]; then
+ git branch $b $i > /dev/null
+ fi
+ done
+
+ # Create a working tree copy of the kernel by checkout out a branch
+ git show-ref --quiet --verify -- "refs/heads/${KBRANCH}"
+ if [ $? -eq 0 ]; then
+ # checkout and clobber and unimportant files
+ git checkout -f ${KBRANCH}
+ else
+ echo "Not checking out ${KBRANCH}, it will be created later"
+ git checkout -f master
+ fi
+}
+do_kernel_checkout[dirs] = "${S}"
+
+addtask kernel_checkout before do_patch after do_unpack
+
+do_kernel_configme[dirs] = "${CCACHE_DIR} ${S} ${B}"
+do_kernel_configme() {
+ echo "[INFO] doing kernel configme"
+
+ if [ -n ${KCONFIG_MODE} ]; then
+ configmeflags=${KCONFIG_MODE}
+ else
+ # If a defconfig was passed, use =n as the baseline, which is achieved
+ # via --allnoconfig
+ if [ -f ${WORKDIR}/defconfig ]; then
+ configmeflags="--allnoconfig"
+ fi
+ fi
+
+ cd ${S}
+ PATH=${PATH}:${S}/scripts/util
+ configme ${configmeflags} --reconfig --output ${B} ${LINUX_KERNEL_TYPE} ${KMACHINE}
+ if [ $? -ne 0 ]; then
+ echo "ERROR. Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
+ exit 1
+ fi
+
+ echo "# Global settings from linux recipe" >> ${B}/.config
+ echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
+}
+
+python do_kernel_configcheck() {
+ import bb, re, string, sys, commands
+
+ bb.plain("NOTE: validating kernel configuration")
+
+ pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
+ cmd = d.expand("cd ${B}/..; kconf_check -config- ${B} ${S} ${B} ${KBRANCH}")
+ ret, result = commands.getstatusoutput("%s%s" % (pathprefix, cmd))
+
+ bb.plain( "%s" % result )
+}
+
+
+# Ensure that the branches (BSP and meta) are on the locatios specified by
+# their SRCREV values. If they are NOT on the right commits, the branches
+# are reset to the correct commit.
+do_validate_branches() {
+ cd ${S}
+
+ set +e
+ # if SRCREV is AUTOREV it shows up as AUTOINC there's nothing to
+ # check and we can exit early
+ if [ "${SRCREV_machine}" = "AUTOINC" ]; then
+ return
+ fi
+
+ # if the branches do not exist, then there's nothing to check either
+ git show-ref --quiet --verify -- "refs/heads/${KBRANCH}"
+ if [ $? -eq 1 ]; then
+ return
+ fi
+
+ branch_head=`git show-ref -s --heads ${KBRANCH}`
+ if [ -z "${SRCREV_machine}" ]; then
+ target_branch_head="${SRCREV}"
+ else
+ target_branch_head="${SRCREV_machine}"
+ fi
+
+ if [ "${target_branch_head}" = "AUTOINC" ]; then
+ return
+ fi
+
+ # We have SRCREVs and we have branches so validation can continue!
+ current=`git branch |grep \*|sed 's/^\* //'`
+ if [ -n "$target_branch_head" ] && [ "$branch_head" != "$target_branch_head" ] &&
+ [ "$target_branch_head" != "AUTOINC" ]; then
+ ref=`git show ${target_branch_head} 2>&1 | head -n1 || true`
+ if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
+ echo "ERROR ${target_branch_head} is not a valid commit ID."
+ echo "The kernel source tree may be out of sync"
+ exit 1
+ else
+ echo "Forcing branch $current to ${target_branch_head}"
+ git branch -m $current $current-orig
+ git checkout -b $current ${target_branch_head}
+ fi
+ fi
+
+ meta_head=`git show-ref -s --heads ${KMETA}`
+ target_meta_head="${SRCREV_meta}"
+ git show-ref --quiet --verify -- "refs/heads/${KMETA}"
+ if [ $? -eq 1 ]; then
+ return
+ fi
+
+ if [ "${target_meta_head}" = "AUTOINC" ]; then
+ return
+ fi
+
+ if [ "$meta_head" != "$target_meta_head" ]; then
+ ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
+ if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
+ echo "ERROR ${target_meta_head} is not a valid commit ID"
+ echo "The kernel source tree may be out of sync"
+ exit 1
+ else
+ echo "Forcing branch meta to ${target_meta_head}"
+ git branch -m ${KMETA} ${KMETA}-orig
+ git checkout -b ${KMETA} ${target_meta_head}
+ if [ $? -ne 0 ];then
+ echo "ERROR: could not checkout meta branch from known hash ${target_meta_head}"
+ exit 1
+ fi
+ fi
+ fi
+
+ # restore the branch for builds
+ git checkout -f ${KBRANCH}
+}
+
+# Many scripts want to look in arch/$arch/boot for the bootable
+# image. This poses a problem for vmlinux based booting. This
+# task arranges to have vmlinux appear in the normalized directory
+# location.
+do_kernel_link_vmlinux() {
+ if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
+ mkdir ${B}/arch/${ARCH}/boot
+ fi
+ cd ${B}/arch/${ARCH}/boot
+ ln -sf ../../../vmlinux
+}
+
+OE_TERMINAL_EXPORTS += "GUILT_BASE"
+GUILT_BASE = "meta"
diff --git a/meta/classes/kernel.bbclass b/meta/classes/kernel.bbclass
index b2266bee5..90af59712 100644
--- a/meta/classes/kernel.bbclass
+++ b/meta/classes/kernel.bbclass
@@ -1,33 +1,39 @@
inherit linux-kernel-base module_strip
PROVIDES += "virtual/kernel"
-DEPENDS += "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}depmod-${@get_kernelmajorversion('${PV}')} virtual/${TARGET_PREFIX}gcc${KERNEL_CCSUFFIX} update-modules"
+DEPENDS += "virtual/${TARGET_PREFIX}gcc kmod-native virtual/${TARGET_PREFIX}gcc${KERNEL_CCSUFFIX} update-modules"
# we include gcc above, we dont need virtual/libc
INHIBIT_DEFAULT_DEPS = "1"
KERNEL_IMAGETYPE ?= "zImage"
+INITRAMFS_IMAGE ?= ""
+INITRAMFS_TASK ?= ""
python __anonymous () {
-
- import bb
-
- kerneltype = bb.data.getVar('KERNEL_IMAGETYPE', d, 1) or ''
+ kerneltype = d.getVar('KERNEL_IMAGETYPE', True) or ''
if kerneltype == 'uImage':
- depends = bb.data.getVar("DEPENDS", d, 1)
- depends = "%s u-boot-mkimage-native" % depends
- bb.data.setVar("DEPENDS", depends, d)
+ depends = d.getVar("DEPENDS", True)
+ depends = "%s u-boot-mkimage-native" % depends
+ d.setVar("DEPENDS", depends)
+
+ image = d.getVar('INITRAMFS_IMAGE', True)
+ if image:
+ d.setVar('INITRAMFS_TASK', '${INITRAMFS_IMAGE}:do_rootfs')
}
-inherit kernel-arch
+inherit kernel-arch deploy
PACKAGES_DYNAMIC += "kernel-module-*"
PACKAGES_DYNAMIC += "kernel-image-*"
+PACKAGES_DYNAMIC += "kernel-firmware-*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
-KERNEL_PRIORITY = "${@bb.data.getVar('PV',d,1).split('-')[0].split('.')[-1]}"
+KERNEL_PRIORITY ?= "${@int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[0]) * 10000 + \
+ int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[1]) * 100 + \
+ int(d.getVar('PV',1).split('-')[0].split('+')[0].split('.')[-1])}"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
@@ -42,20 +48,19 @@ HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
TARGET_LD_KERNEL_ARCH ?= ""
HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
-KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}"
-KERNEL_LD = "${LD}${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
+KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}${TOOLCHAIN_OPTIONS}"
+KERNEL_LD = "${HOST_PREFIX}ld${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}${TOOLCHAIN_OPTIONS}"
# Where built kernel lies in the kernel tree
-KERNEL_OUTPUT = "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
+KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
KERNEL_IMAGEDEST = "boot"
#
# configuration
#
-export CMDLINE_CONSOLE = "console=${@bb.data.getVar("KERNEL_CONSOLE",d,1) or "ttyS0"}"
+export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
-KERNEL_VERSION = "${@get_kernelversion('${S}')}"
-KERNEL_MAJOR_VERSION = "${@get_kernelmajorversion('${KERNEL_VERSION}')}"
+KERNEL_VERSION = "${@get_kernelversion('${B}')}"
KERNEL_LOCALVERSION ?= ""
@@ -66,175 +71,212 @@ PACKAGE_ARCH = "${MACHINE_ARCH}"
UBOOT_ENTRYPOINT ?= "20008000"
UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
+# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
+# We don't want to override kernel Makefile variables from the environment
+EXTRA_OEMAKE = ""
+
+KERNEL_ALT_IMAGETYPE ??= ""
+
+KERNEL_IMAGETYPE_FOR_MAKE = "${@(lambda s: s[:-3] if s[-3:] == ".gz" else s)(d.getVar('KERNEL_IMAGETYPE', True))}"
+
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
oe_runmake include/linux/version.h CC="${KERNEL_CC}" LD="${KERNEL_LD}"
- if [ "${KERNEL_MAJOR_VERSION}" != "2.6" ]; then
- oe_runmake dep CC="${KERNEL_CC}" LD="${KERNEL_LD}"
- fi
- oe_runmake ${KERNEL_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}"
- if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- oe_runmake modules CC="${KERNEL_CC}" LD="${KERNEL_LD}"
- else
- oenote "no modules to compile"
+ oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}"
+ if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then
+ gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}"
fi
}
-kernel_do_stage() {
- ASMDIR=`readlink include/asm`
-
- mkdir -p ${STAGING_KERNEL_DIR}/include/$ASMDIR
- cp -fR include/$ASMDIR/* ${STAGING_KERNEL_DIR}/include/$ASMDIR/
- rm -f $ASMDIR ${STAGING_KERNEL_DIR}/include/asm
- ln -sf $ASMDIR ${STAGING_KERNEL_DIR}/include/asm
-
- mkdir -p ${STAGING_KERNEL_DIR}/include/asm-generic
- cp -fR include/asm-generic/* ${STAGING_KERNEL_DIR}/include/asm-generic/
-
- mkdir -p ${STAGING_KERNEL_DIR}/include/linux
- cp -fR include/linux/* ${STAGING_KERNEL_DIR}/include/linux/
-
- mkdir -p ${STAGING_KERNEL_DIR}/include/net
- cp -fR include/net/* ${STAGING_KERNEL_DIR}/include/net/
-
- mkdir -p ${STAGING_KERNEL_DIR}/include/pcmcia
- cp -fR include/pcmcia/* ${STAGING_KERNEL_DIR}/include/pcmcia/
-
- for entry in drivers/crypto include/media include/acpi include/sound include/video; do
- if [ -d $entry ]; then
- mkdir -p ${STAGING_KERNEL_DIR}/$entry
- cp -fR $entry/* ${STAGING_KERNEL_DIR}/$entry/
- fi
- done
-
- if [ -d drivers/sound ]; then
- # 2.4 alsa needs some headers from this directory
- mkdir -p ${STAGING_KERNEL_DIR}/include/drivers/sound
- cp -fR drivers/sound/*.h ${STAGING_KERNEL_DIR}/include/drivers/sound/
- fi
-
- install -m 0644 .config ${STAGING_KERNEL_DIR}/config-${KERNEL_VERSION}
- ln -sf config-${KERNEL_VERSION} ${STAGING_KERNEL_DIR}/.config
- ln -sf config-${KERNEL_VERSION} ${STAGING_KERNEL_DIR}/kernel-config
- echo "${KERNEL_VERSION}" >${STAGING_KERNEL_DIR}/kernel-abiversion
- echo "${S}" >${STAGING_KERNEL_DIR}/kernel-source
- echo "${KERNEL_CCSUFFIX}" >${STAGING_KERNEL_DIR}/kernel-ccsuffix
- echo "${KERNEL_LDSUFFIX}" >${STAGING_KERNEL_DIR}/kernel-ldsuffix
- [ -e Rules.make ] && install -m 0644 Rules.make ${STAGING_KERNEL_DIR}/
- [ -e Makefile ] && install -m 0644 Makefile ${STAGING_KERNEL_DIR}/
-
- # Check if arch/${ARCH}/Makefile exists and install it
- if [ -e arch/${ARCH}/Makefile ]; then
- install -d ${STAGING_KERNEL_DIR}/arch/${ARCH}
- install -m 0644 arch/${ARCH}/Makefile* ${STAGING_KERNEL_DIR}/arch/${ARCH}
- # Otherwise check arch/x86/Makefile for i386 and x86_64 on kernels >= 2.6.24
- elif [ -e arch/x86/Makefile ]; then
- install -d ${STAGING_KERNEL_DIR}/arch/x86
- install -m 0644 arch/x86/Makefile* ${STAGING_KERNEL_DIR}/arch/x86
+do_compile_kernelmodules() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
+ if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ oe_runmake ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}"
+ else
+ bbnote "no modules to compile"
fi
- cp -fR include/config* ${STAGING_KERNEL_DIR}/include/
- # Install kernel images and system.map to staging
- [ -e vmlinux ] && install -m 0644 vmlinux ${STAGING_KERNEL_DIR}/
- install -m 0644 ${KERNEL_OUTPUT} ${STAGING_KERNEL_DIR}/${KERNEL_IMAGETYPE}
- install -m 0644 System.map ${STAGING_KERNEL_DIR}/System.map-${KERNEL_VERSION}
- [ -e Module.symvers ] && install -m 0644 Module.symvers ${STAGING_KERNEL_DIR}/
-
- cp -fR scripts ${STAGING_KERNEL_DIR}/
}
+addtask compile_kernelmodules after do_compile before do_install
kernel_do_install() {
+ #
+ # First install the modules
+ #
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
+ rm -f "${D}/lib/modules/${KERNEL_VERSION}/modules.order"
+ rm -f "${D}/lib/modules/${KERNEL_VERSION}/modules.builtin"
+ rm "${D}/lib/modules/${KERNEL_VERSION}/build"
+ rm "${D}/lib/modules/${KERNEL_VERSION}/source"
else
- oenote "no modules to install"
+ bbnote "no modules to install"
fi
-
+
+ #
+ # Install various kernel output (zImage, map file, config, module support files)
+ #
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
- install -d ${D}/etc/modutils
- if [ "${KERNEL_MAJOR_VERSION}" = "2.6" ]; then
- install -d ${D}/etc/modprobe.d
+ [ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
+ install -d ${D}/etc/modules-load.d
+ install -d ${D}/etc/modprobe.d
+
+ #
+ # Support for external module building - create a minimal copy of the
+ # kernel source tree.
+ #
+ kerneldir=${D}/kernel
+ install -d $kerneldir
+
+ #
+ # Store the kernel version in sysroots for module-base.bbclass
+ #
+
+ echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
+
+ #
+ # Store kernel image name to allow use during image generation
+ #
+
+ echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
+
+ #
+ # Copy the entire source tree. In case an external build directory is
+ # used, copy the build directory over first, then copy over the source
+ # dir. This ensures the original Makefiles are used and not the
+ # redirecting Makefiles in the build directory.
+ #
+ # work and sysroots can be on different partitions, so we can't rely on
+ # hardlinking, unfortunately.
+ #
+ cp -fR * $kerneldir
+ cp .config $kerneldir
+ if [ "${S}" != "${B}" ]; then
+ cp -fR ${S}/* $kerneldir
fi
-
- # Check if scripts/genksyms exists and if so, build it
- if [ -e scripts/genksyms/ ]; then
- oe_runmake SUBDIRS="scripts/genksyms"
- fi
+ install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
+ install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
+
+ #
+ # Clean and remove files not needed for building modules.
+ # Some distributions go through a lot more trouble to strip out
+ # unecessary headers, for now, we just prune the obvious bits.
+ #
+ # We don't want to leave host-arch binaries in /sysroots, so
+ # we clean the scripts dir while leaving the generated config
+ # and include files.
+ #
+ oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean
+ make -C $kerneldir _mrproper_scripts
+ find $kerneldir -path $kerneldir/scripts -prune -o -name "*.[csS]" -exec rm '{}' \;
+ find $kerneldir/Documentation -name "*.txt" -exec rm '{}' \;
- install -d ${STAGING_KERNEL_DIR}
- cp -fR scripts ${STAGING_KERNEL_DIR}/
+ # As of Linux kernel version 3.0.1, the clean target removes
+ # arch/powerpc/lib/crtsavres.o which is present in
+ # KBUILD_LDFLAGS_MODULE, making it required to build external modules.
+ if [ ${ARCH} = "powerpc" ]; then
+ cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
+ fi
+
+ # Remove the following binaries which cause strip errors
+ # during do_package for cross-compiled platforms
+ bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
+ arch/powerpc/boot/mktree"
+ for entry in $bin_files; do
+ rm -f $kerneldir/$entry
+ done
}
-kernel_do_configure() {
- yes '' | oe_runmake oldconfig
+PACKAGE_PREPROCESS_FUNCS += "kernel_package_preprocess"
+
+kernel_package_preprocess () {
+ rm -rf ${PKGD}/kernel
}
-do_menuconfig() {
- export TERMWINDOWTITLE="${PN} Kernel Configuration"
- export SHELLCMDS="make menuconfig"
- ${TERMCMDRUN}
- if [ $? -ne 0 ]; then
- echo "Fatal: '${TERMCMD}' not found. Check TERMCMD variable."
- exit 1
+sysroot_stage_all_append() {
+ sysroot_stage_dir ${D}/kernel ${SYSROOT_DESTDIR}/kernel
+}
+
+kernel_do_configure() {
+ # fixes extra + in /lib/modules/2.6.37+
+ # $ scripts/setlocalversion . => +
+ # $ make kernelversion => 2.6.37
+ # $ make kernelrelease => 2.6.37+
+ touch ${B}/.scmversion ${S}/.scmversion
+
+ # Copy defconfig to .config if .config does not exist. This allows
+ # recipes to manage the .config themselves in do_configure_prepend().
+ if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
+ cp "${WORKDIR}/defconfig" "${B}/.config"
fi
+ yes '' | oe_runmake oldconfig
+
+ if [ ! -z "${INITRAMFS_IMAGE}" ]; then
+ for img in cpio.gz cpio.lzo cpio.lzma cpio.xz; do
+ if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
+ cp "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" initramfs.$img
+ fi
+ done
+ fi
+}
+
+do_configure[depends] += "${INITRAMFS_TASK}"
+
+do_savedefconfig() {
+ oe_runmake savedefconfig
}
-do_menuconfig[nostamp] = "1"
-addtask menuconfig after do_patch
+do_savedefconfig[nostamp] = "1"
+addtask savedefconfig after do_configure
-pkg_postinst_kernel () {
+pkg_postinst_kernel-base () {
cd /${KERNEL_IMAGEDEST}; update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
}
-pkg_postrm_kernel () {
+pkg_postrm_kernel-base () {
cd /${KERNEL_IMAGEDEST}; update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true
}
inherit cml1
-EXPORT_FUNCTIONS do_compile do_install do_stage do_configure
+EXPORT_FUNCTIONS do_compile do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERISON}
-PACKAGES = "kernel kernel-base kernel-image kernel-dev kernel-vmlinux"
+PACKAGES = "kernel kernel-base kernel-vmlinux kernel-image kernel-dev kernel-misc"
FILES = ""
FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
-FILES_kernel-dev = "/boot/System.map* /boot/config*"
+FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config*"
FILES_kernel-vmlinux = "/boot/vmlinux*"
+# misc is a package to contain files we need in staging
+FILES_kernel-misc = "/kernel/include/config /kernel/scripts /kernel/drivers/crypto /kernel/drivers/media"
RDEPENDS_kernel = "kernel-base"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
RDEPENDS_kernel-base ?= "kernel-image"
-PKG_kernel-image = "kernel-image-${KERNEL_VERSION}"
-PKG_kernel-base = "kernel-${KERNEL_VERSION}"
+PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
+PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
ALLOW_EMPTY_kernel = "1"
ALLOW_EMPTY_kernel-base = "1"
ALLOW_EMPTY_kernel-image = "1"
-# Userspace workarounds for kernel modules issues
-# This is shame, fix the kernel instead!
-DEPENDS_kernel-module-dtl1-cs = "bluez-dtl1-workaround"
-RDEPENDS_kernel-module-dtl1-cs = "bluez-dtl1-workaround"
-
pkg_postinst_kernel-image () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
if [ -n "$D" ]; then
- ${HOST_PREFIX}depmod-${KERNEL_MAJOR_VERSION} -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_VERSION} ${KERNEL_VERSION}
+ depmod -a -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_VERSION} ${KERNEL_VERSION}
else
depmod -a
fi
}
pkg_postinst_modules () {
-if [ -n "$D" ]; then
- ${HOST_PREFIX}depmod-${KERNEL_MAJOR_VERSION} -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_VERSION} ${KERNEL_VERSION}
-else
+if [ -z "$D" ]; then
depmod -a
update-modules || true
fi
@@ -271,13 +313,16 @@ module_conf_rfcomm = "alias bt-proto-3 rfcomm"
python populate_packages_prepend () {
def extract_modinfo(file):
- import os, re
- tmpfile = os.tmpnam()
- cmd = "PATH=\"%s\" %sobjcopy -j .modinfo -O binary %s %s" % (bb.data.getVar("PATH", d, 1), bb.data.getVar("HOST_PREFIX", d, 1) or "", file, tmpfile)
+ import tempfile, re
+ tempfile.tempdir = d.getVar("WORKDIR", True)
+ tf = tempfile.mkstemp()
+ tmpfile = tf[1]
+ cmd = "PATH=\"%s\" %sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("PATH", True), d.getVar("HOST_PREFIX", True) or "", file, tmpfile)
os.system(cmd)
f = open(tmpfile)
l = f.read().split("\000")
f.close()
+ os.close(tf[0])
os.unlink(tmpfile)
exp = re.compile("([^=]+)=(.*)")
vals = {}
@@ -289,23 +334,21 @@ python populate_packages_prepend () {
return vals
def parse_depmod():
- import os, re
+ import re
- dvar = bb.data.getVar('D', d, 1)
+ dvar = d.getVar('PKGD', True)
if not dvar:
- bb.error("D not defined")
+ bb.error("PKGD not defined")
return
- kernelver = bb.data.getVar('KERNEL_VERSION', d, 1)
+ kernelver = d.getVar('KERNEL_VERSION', True)
kernelver_stripped = kernelver
m = re.match('^(.*-hh.*)[\.\+].*$', kernelver)
if m:
kernelver_stripped = m.group(1)
- path = bb.data.getVar("PATH", d, 1)
- host_prefix = bb.data.getVar("HOST_PREFIX", d, 1) or ""
- major_version = bb.data.getVar('KERNEL_MAJOR_VERSION', d, 1)
+ path = d.getVar("PATH", True)
- cmd = "PATH=\"%s\" %sdepmod-%s -n -a -r -b %s -F %s/boot/System.map-%s %s" % (path, host_prefix, major_version, dvar, dvar, kernelver, kernelver_stripped)
+ cmd = "PATH=\"%s\" depmod -n -a -b %s -F %s/boot/System.map-%s %s" % (path, dvar, dvar, kernelver, kernelver_stripped)
f = os.popen(cmd, 'r')
deps = {}
@@ -340,10 +383,13 @@ python populate_packages_prepend () {
return deps
def get_dependencies(file, pattern, format):
- file = file.replace(bb.data.getVar('D', d, 1) or '', '', 1)
+ # file no longer includes PKGD
+ file = file.replace(d.getVar('PKGD', True) or '', '', 1)
+ # instead is prefixed with /lib/modules/${KERNEL_VERSION}
+ file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', True) or '', '', 1)
if module_deps.has_key(file):
- import os.path, re
+ import re
dependencies = []
for i in module_deps[file]:
m = re.match(pattern, os.path.basename(i))
@@ -359,124 +405,151 @@ python populate_packages_prepend () {
import re
vals = extract_modinfo(file)
- dvar = bb.data.getVar('D', d, 1)
+ dvar = d.getVar('PKGD', True)
- # If autoloading is requested, output /etc/modutils/<name> and append
+ # If autoloading is requested, output /etc/modules-load.d/<name>.conf and append
# appropriate modprobe commands to the postinst
- autoload = bb.data.getVar('module_autoload_%s' % basename, d, 1)
+ autoload = d.getVar('module_autoload_%s' % basename, True)
if autoload:
- name = '%s/etc/modutils/%s' % (dvar, basename)
+ name = '%s/etc/modules-load.d/%s.conf' % (dvar, basename)
f = open(name, 'w')
for m in autoload.split():
f.write('%s\n' % m)
f.close()
- postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True)
if not postinst:
bb.fatal("pkg_postinst_%s not defined" % pkg)
- postinst += bb.data.getVar('autoload_postinst_fragment', d, 1) % autoload
- bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
+ postinst += d.getVar('autoload_postinst_fragment', True) % autoload
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
# Write out any modconf fragment
- modconf = bb.data.getVar('module_conf_%s' % basename, d, 1)
+ modconf = d.getVar('module_conf_%s' % basename, True)
if modconf:
- if bb.data.getVar("KERNEL_MAJOR_VERSION", d, 1) == "2.6":
- name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
- else:
- name = '%s/etc/modutils/%s.conf' % (dvar, basename)
+ name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
f = open(name, 'w')
f.write("%s\n" % modconf)
f.close()
- files = bb.data.getVar('FILES_%s' % pkg, d, 1)
- files = "%s /etc/modutils/%s /etc/modutils/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename, basename)
- bb.data.setVar('FILES_%s' % pkg, files, d)
+ files = d.getVar('FILES_%s' % pkg, True)
+ files = "%s /etc/modules-load.d/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename)
+ d.setVar('FILES_%s' % pkg, files)
if vals.has_key("description"):
- old_desc = bb.data.getVar('DESCRIPTION_' + pkg, d, 1) or ""
- bb.data.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"], d)
+ old_desc = d.getVar('DESCRIPTION_' + pkg, True) or ""
+ d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
- rdepends_str = bb.data.getVar('RDEPENDS_' + pkg, d, 1)
+ rdepends_str = d.getVar('RDEPENDS_' + pkg, True)
if rdepends_str:
rdepends = rdepends_str.split()
else:
rdepends = []
rdepends.extend(get_dependencies(file, pattern, format))
- bb.data.setVar('RDEPENDS_' + pkg, ' '.join(rdepends), d)
+ d.setVar('RDEPENDS_' + pkg, ' '.join(rdepends))
module_deps = parse_depmod()
module_regex = '^(.*)\.k?o$'
module_pattern = 'kernel-module-%s'
- postinst = bb.data.getVar('pkg_postinst_modules', d, 1)
- postrm = bb.data.getVar('pkg_postrm_modules', d, 1)
- do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='update-modules kernel-%s' % bb.data.getVar("KERNEL_VERSION", d, 1))
+ postinst = d.getVar('pkg_postinst_modules', True)
+ postrm = d.getVar('pkg_postrm_modules', True)
+ do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.bin$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.fw$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.cis$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+ do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='update-modules kernel-%s' % d.getVar("KERNEL_VERSION", True))
+
+ # If modules-load.d and modprobe.d are empty at this point, remove them to
+ # avoid warnings. removedirs only raises an OSError if an empty
+ # directory cannot be removed.
+ dvar = d.getVar('PKGD', True)
+ for dir in ["%s/etc/modprobe.d" % (dvar), "%s/etc/modules-load.d" % (dvar), "%s/etc" % (dvar)]:
+ if len(os.listdir(dir)) == 0:
+ os.rmdir(dir)
- import re, os
+ import re
metapkg = "kernel-modules"
- bb.data.setVar('ALLOW_EMPTY_' + metapkg, "1", d)
- bb.data.setVar('FILES_' + metapkg, "", d)
- blacklist = [ 'kernel-dev', 'kernel-image', 'kernel-base', 'kernel-vmlinux' ]
+ d.setVar('ALLOW_EMPTY_' + metapkg, "1")
+ d.setVar('FILES_' + metapkg, "")
+ blacklist = [ 'kernel-dev', 'kernel-image', 'kernel-base', 'kernel-vmlinux', 'perf', 'perf-dbg', 'kernel-misc' ]
for l in module_deps.values():
for i in l:
pkg = module_pattern % legitimize_package_name(re.match(module_regex, os.path.basename(i)).group(1))
blacklist.append(pkg)
metapkg_rdepends = []
- packages = bb.data.getVar('PACKAGES', d, 1).split()
+ packages = d.getVar('PACKAGES', True).split()
for pkg in packages[1:]:
if not pkg in blacklist and not pkg in metapkg_rdepends:
metapkg_rdepends.append(pkg)
- bb.data.setVar('RDEPENDS_' + metapkg, ' '.join(metapkg_rdepends), d)
- bb.data.setVar('DESCRIPTION_' + metapkg, 'Kernel modules meta package', d)
+ d.setVar('RDEPENDS_' + metapkg, ' '.join(metapkg_rdepends))
+ d.setVar('DESCRIPTION_' + metapkg, 'Kernel modules meta package')
packages.append(metapkg)
- bb.data.setVar('PACKAGES', ' '.join(packages), d)
+ d.setVar('PACKAGES', ' '.join(packages))
}
# Support checking the kernel size since some kernels need to reside in partitions
# with a fixed length or there is a limit in transferring the kernel to memory
do_sizecheck() {
if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
- size=`ls -l arch/${ARCH}/boot/${KERNEL_IMAGETYPE} | awk '{ print $5}'`
- if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
- rm arch/${ARCH}/boot/${KERNEL_IMAGETYPE}
- die "This kernel (size=$size > ${KERNEL_IMAGE_MAXSIZE}) is too big for your device. Please reduce the size of the kernel by making more of it modular."
- fi
- fi
+ size=`ls -l ${KERNEL_OUTPUT} | awk '{ print $5}'`
+ if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
+ rm ${KERNEL_OUTPUT}
+ die "This kernel (size=$size > ${KERNEL_IMAGE_MAXSIZE}) is too big for your device. Please reduce the size of the kernel by making more of it modular."
+ fi
+ fi
}
addtask sizecheck before do_install after do_compile
KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
+# Don't include the DATETIME variable in the sstate package signatures
+KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}"
-do_deploy() {
- install -d ${DEPLOY_DIR_IMAGE}
- install -m 0644 arch/${ARCH}/boot/${KERNEL_IMAGETYPE} ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGE_BASE_NAME}.bin
- package_stagefile_shell ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGE_BASE_NAME}.bin
- if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
- tar -cvzf ${DEPLOY_DIR_IMAGE}/modules-${KERNEL_VERSION}-${PR}-${MACHINE}.tgz -C ${D} lib
- fi
-
+do_uboot_mkimage() {
if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
- if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
- ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
- uboot-mkimage -A ${ARCH} -O linux -T kernel -C none -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${DEPLOY_DIR_IMAGE}/uImage-${PV}-${PR}-${MACHINE}-${DATETIME}.bin
- rm -f linux.bin
- else
- ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
- rm -f linux.bin.gz
- gzip -9 linux.bin
- uboot-mkimage -A ${ARCH} -O linux -T kernel -C gzip -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz ${DEPLOY_DIR_IMAGE}/uImage-${PV}-${PR}-${MACHINE}-${DATETIME}.bin
- rm -f linux.bin.gz
+ if test ! -e arch/${ARCH}/boot/uImage ; then
+ ENTRYPOINT=${UBOOT_ENTRYPOINT}
+ if test -n "${UBOOT_ENTRYSYMBOL}"; then
+ ENTRYPOINT=`${HOST_PREFIX}nm ${S}/vmlinux | \
+ awk '$3=="${UBOOT_ENTRYSYMBOL}" {print $1}'`
+ fi
+ if test -e arch/${ARCH}/boot/compressed/vmlinux ; then
+ ${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C none -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin arch/${ARCH}/boot/uImage
+ rm -f linux.bin
+ else
+ ${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
+ rm -f linux.bin.gz
+ gzip -9 linux.bin
+ uboot-mkimage -A ${UBOOT_ARCH} -O linux -T kernel -C gzip -a ${UBOOT_LOADADDRESS} -e $ENTRYPOINT -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz arch/${ARCH}/boot/uImage
+ rm -f linux.bin.gz
+ fi
fi
- package_stagefile_shell ${DEPLOY_DIR_IMAGE}/uImage-${PV}-${PR}-${MACHINE}-${DATETIME}.bin
+ fi
+}
+
+addtask uboot_mkimage before do_install after do_compile
+
+kernel_do_deploy() {
+ install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
+ if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
+ tar -cvzf ${DEPLOYDIR}/modules-${KERNEL_VERSION}-${PR}-${MACHINE}.tgz -C ${D} lib
fi
- cd ${DEPLOY_DIR_IMAGE}
+ cd ${DEPLOYDIR}
rm -f ${KERNEL_IMAGE_SYMLINK_NAME}.bin
ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${KERNEL_IMAGE_SYMLINK_NAME}.bin
- package_stagefile_shell ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGE_SYMLINK_NAME}.bin
+ ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${KERNEL_IMAGETYPE}
+
+ cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
}
+do_deploy[dirs] = "${DEPLOYDIR} ${B}"
+
+addtask deploy before do_build after do_install
-do_deploy[dirs] = "${S}"
+EXPORT_FUNCTIONS do_deploy
-addtask deploy before do_package after do_install
+# perf must be enabled in individual kernel recipes
+PACKAGES =+ "perf-dbg perf"
+FILES_perf = "${bindir}/* \
+ ${libexecdir}"
+FILES_perf-dbg = "${FILES_${PN}-dbg}"
diff --git a/meta/classes/lib_package.bbclass b/meta/classes/lib_package.bbclass
index 3fd43c3df..5ce872757 100644
--- a/meta/classes/lib_package.bbclass
+++ b/meta/classes/lib_package.bbclass
@@ -2,8 +2,9 @@ PACKAGES += "${PN}-bin"
FILES_${PN} = "${libexecdir} ${libdir}/lib*${SOLIBS} \
${sysconfdir} ${sharedstatedir} ${localstatedir} \
- /lib/*${SOLIBS} ${datadir}/${PN} ${libdir}/${PN}"
+ ${base_libdir}/*${SOLIBS} \
+ ${datadir}/${PN} ${libdir}/${PN}"
FILES_${PN}-dev = "${includedir} ${libdir}/lib*${SOLIBSDEV} ${libdir}/*.la \
- ${libdir}/*.a ${libdir}/pkgconfig /lib/*.a /lib/*.o \
+ ${libdir}/*.o ${libdir}/pkgconfig /lib/*.o \
${datadir}/aclocal ${bindir}/*-config"
FILES_${PN}-bin = "${bindir}/* ${sbindir}/* /bin/* /sbin/*"
diff --git a/meta/classes/libc-common.bbclass b/meta/classes/libc-common.bbclass
new file mode 100644
index 000000000..9b91f0a1a
--- /dev/null
+++ b/meta/classes/libc-common.bbclass
@@ -0,0 +1,35 @@
+do_install() {
+ oe_runmake install_root=${D} install
+ for r in ${rpcsvc}; do
+ h=`echo $r|sed -e's,\.x$,.h,'`
+ install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
+ done
+ install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
+ install -d ${D}${libdir}/locale
+ make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
+ # get rid of some broken files...
+ for i in ${GLIBC_BROKEN_LOCALES}; do
+ grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
+ mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
+ done
+ rm -f ${D}${sysconfdir}/rpc
+ rm -rf ${D}${datadir}/zoneinfo
+ rm -rf ${D}${libexecdir}/getconf
+}
+
+def get_libc_fpu_setting(bb, d):
+ if d.getVar('TARGET_FPU', True) in [ 'soft' ]:
+ return "--without-fp"
+ return ""
+
+python populate_packages_prepend () {
+ if d.getVar('DEBIAN_NAMES', True):
+ bpn = d.getVar('BPN', True)
+ d.setVar('PKG_'+bpn, 'libc6')
+ d.setVar('PKG_'+bpn+'-dev', 'libc6-dev')
+ d.setVar('PKG_'+bpn+'-dbg', 'libc6-dbg')
+ # For backward compatibility with old -dbg package
+ d.setVar('RPROVIDES_' + bpn + '-dbg', 'libc-dbg')
+ d.setVar('RCONFLICTS_' + bpn + '-dbg', 'libc-dbg')
+ d.setVar('RREPLACES_' + bpn + '-dbg', 'libc-dbg')
+}
diff --git a/meta/classes/libc-package.bbclass b/meta/classes/libc-package.bbclass
new file mode 100644
index 000000000..10f5f8748
--- /dev/null
+++ b/meta/classes/libc-package.bbclass
@@ -0,0 +1,384 @@
+#
+# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
+# may need packaging and its pointless to duplicate this code.
+#
+# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
+# "compile" - Use QEMU to generate the binary locale files
+# "precompiled" - The binary locale files are pregenerated and already present
+# "ondevice" - The device will build the locale files upon first boot through the postinst
+
+GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
+
+python __anonymous () {
+ enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
+
+ pn = d.getVar("PN", True)
+ if pn.endswith("-initial"):
+ enabled = False
+
+ if enabled and int(enabled):
+ import re
+
+ target_arch = d.getVar("TARGET_ARCH", True)
+ binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
+
+ for regexp in binary_arches.split(" "):
+ r = re.compile(regexp)
+
+ if r.match(target_arch):
+ depends = d.getVar("DEPENDS", True)
+ if use_cross_localedef == "1" :
+ depends = "%s cross-localedef-native" % depends
+ else:
+ depends = "%s qemu-native" % depends
+ d.setVar("DEPENDS", depends)
+ d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
+ break
+
+ distro_features = (d.getVar('DISTRO_FEATURES', True) or '').split()
+
+ # try to fix disable charsets/locales/locale-code compile fail
+ if 'libc-charsets' in distro_features and 'libc-locales' in distro_features and 'libc-locale-code' in distro_features:
+ d.setVar('PACKAGE_NO_GCONV', '0')
+ else:
+ d.setVar('PACKAGE_NO_GCONV', '1')
+}
+
+OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
+
+do_configure_prepend() {
+ if [ -e ${S}/elf/ldd.bash.in ]; then
+ sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
+ fi
+}
+
+
+
+# indentation removed on purpose
+locale_base_postinst() {
+#!/bin/sh
+
+if [ "x$D" != "x" ]; then
+ exit 1
+fi
+
+rm -rf ${TMP_LOCALE}
+mkdir -p ${TMP_LOCALE}
+if [ -f ${libdir}/locale/locale-archive ]; then
+ cp ${libdir}/locale/locale-archive ${TMP_LOCALE}/
+fi
+localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
+mkdir -p ${libdir}/locale/
+mv ${TMP_LOCALE}/locale-archive ${libdir}/locale/
+rm -rf ${TMP_LOCALE}
+}
+
+# indentation removed on purpose
+locale_base_postrm() {
+#!/bin/sh
+
+rm -rf ${TMP_LOCALE}
+mkdir -p ${TMP_LOCALE}
+if [ -f ${libdir}/locale/locale-archive ]; then
+ cp ${libdir}/locale/locale-archive ${TMP_LOCALE}/
+fi
+localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
+mv ${TMP_LOCALE}/locale-archive ${libdir}/locale/
+rm -rf ${TMP_LOCALE}
+}
+
+
+TMP_LOCALE="/tmp/locale${libdir}/locale"
+LOCALETREESRC ?= "${PKGD}"
+
+do_prep_locale_tree() {
+ treedir=${WORKDIR}/locale-tree
+ rm -rf $treedir
+ mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${libdir}/locale
+ tar -cf - -C ${LOCALETREESRC}${datadir} -ps i18n | tar -xf - -C $treedir/${datadir}
+ # unzip to avoid parsing errors
+ for i in $treedir/${datadir}/i18n/charmaps/*gz; do
+ gunzip $i
+ done
+ tar -cf - -C ${LOCALETREESRC}${base_libdir} -ps . | tar -xf - -C $treedir/${base_libdir}
+ if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
+ tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -ps libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
+ fi
+ install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
+}
+
+do_collect_bins_from_locale_tree() {
+ treedir=${WORKDIR}/locale-tree
+
+ mkdir -p ${PKGD}${libdir}
+ tar -cf - -C $treedir/${libdir} -ps locale | tar -xf - -C ${PKGD}${libdir}
+}
+
+inherit qemu
+
+python package_do_split_gconvs () {
+ import os, re
+ if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
+ bb.note("package requested not splitting gconvs")
+ return
+
+ if not d.getVar('PACKAGES', True):
+ return
+
+ mlprefix = d.getVar("MLPREFIX", True) or ""
+
+ bpn = d.getVar('BPN', True)
+ libdir = d.getVar('libdir', True)
+ if not libdir:
+ bb.error("libdir not defined")
+ return
+ datadir = d.getVar('datadir', True)
+ if not datadir:
+ bb.error("datadir not defined")
+ return
+
+ gconv_libdir = base_path_join(libdir, "gconv")
+ charmap_dir = base_path_join(datadir, "i18n", "charmaps")
+ locales_dir = base_path_join(datadir, "i18n", "locales")
+ binary_locales_dir = base_path_join(libdir, "locale")
+
+ def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
+ deps = []
+ f = open(fn, "r")
+ c_re = re.compile('^copy "(.*)"')
+ i_re = re.compile('^include "(\w+)".*')
+ for l in f.readlines():
+ m = c_re.match(l) or i_re.match(l)
+ if m:
+ dp = legitimize_package_name('%s%s-gconv-%s' % (mlprefix, bpn, m.group(1)))
+ if not dp in deps:
+ deps.append(dp)
+ f.close()
+ if deps != []:
+ d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ if bpn != 'glibc':
+ d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+
+ do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
+ description='gconv module for character set %s', hook=calc_gconv_deps, \
+ extra_depends=bpn+'-gconv')
+
+ def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
+ deps = []
+ f = open(fn, "r")
+ c_re = re.compile('^copy "(.*)"')
+ i_re = re.compile('^include "(\w+)".*')
+ for l in f.readlines():
+ m = c_re.match(l) or i_re.match(l)
+ if m:
+ dp = legitimize_package_name('%s%s-charmap-%s' % (mlprefix, bpn, m.group(1)))
+ if not dp in deps:
+ deps.append(dp)
+ f.close()
+ if deps != []:
+ d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ if bpn != 'glibc':
+ d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+
+ do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
+ description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
+
+ def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
+ deps = []
+ f = open(fn, "r")
+ c_re = re.compile('^copy "(.*)"')
+ i_re = re.compile('^include "(\w+)".*')
+ for l in f.readlines():
+ m = c_re.match(l) or i_re.match(l)
+ if m:
+ dp = legitimize_package_name(mlprefix+bpn+'-localedata-%s' % m.group(1))
+ if not dp in deps:
+ deps.append(dp)
+ f.close()
+ if deps != []:
+ d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
+ if bpn != 'glibc':
+ d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
+
+ do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
+ description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
+ d.setVar('PACKAGES', d.getVar('PACKAGES') + ' ' + d.getVar('MLPREFIX') + bpn + '-gconv')
+
+ use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
+
+ dot_re = re.compile("(.*)\.(.*)")
+
+ # Read in supported locales and associated encodings
+ supported = {}
+ with open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED")) as f:
+ for line in f.readlines():
+ try:
+ locale, charset = line.rstrip().split()
+ except ValueError:
+ continue
+ supported[locale] = charset
+
+ # GLIBC_GENERATE_LOCALES var specifies which locales to be generated. empty or "all" means all locales
+ to_generate = d.getVar('GLIBC_GENERATE_LOCALES', True)
+ if not to_generate or to_generate == 'all':
+ to_generate = supported.keys()
+ else:
+ to_generate = to_generate.split()
+ for locale in to_generate:
+ if locale not in supported:
+ if '.' in locale:
+ charset = locale.split('.')[1]
+ else:
+ charset = 'UTF-8'
+ bb.warn("Unsupported locale '%s', assuming encoding '%s'" % (locale, charset))
+ supported[locale] = charset
+
+ def output_locale_source(name, pkgname, locale, encoding):
+ d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
+ (mlprefix+bpn, legitimize_package_name(locale), mlprefix+bpn, legitimize_package_name(encoding)))
+ d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
+ % (locale, encoding, locale))
+ d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
+ (locale, encoding, locale))
+
+ def output_locale_binary_rdepends(name, pkgname, locale, encoding):
+ m = re.match("(.*)\.(.*)", name)
+ if m:
+ libc_name = "%s.%s" % (m.group(1), m.group(2).lower().replace("-",""))
+ else:
+ libc_name = name
+ d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
+ % (mlprefix+bpn, libc_name)))
+
+ commands = {}
+
+ def output_locale_binary(name, pkgname, locale, encoding):
+ treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
+ ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
+ path = d.getVar("PATH", True)
+ i18npath = base_path_join(treedir, datadir, "i18n")
+ gconvpath = base_path_join(treedir, "iconvdata")
+ outputpath = base_path_join(treedir, libdir, "locale")
+
+ use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
+ if use_cross_localedef == "1":
+ target_arch = d.getVar('TARGET_ARCH', True)
+ locale_arch_options = { \
+ "arm": " --uint32-align=4 --little-endian ", \
+ "powerpc": " --uint32-align=4 --big-endian ", \
+ "powerpc64": " --uint32-align=4 --big-endian ", \
+ "mips": " --uint32-align=4 --big-endian ", \
+ "mipsel": " --uint32-align=4 --little-endian ", \
+ "i586": " --uint32-align=4 --little-endian ", \
+ "i686": " --uint32-align=4 --little-endian ", \
+ "x86_64": " --uint32-align=4 --little-endian " }
+
+ if target_arch in locale_arch_options:
+ localedef_opts = locale_arch_options[target_arch]
+ else:
+ bb.error("locale_arch_options not found for target_arch=" + target_arch)
+ raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
+
+ localedef_opts += " --force --old-style --no-archive --prefix=%s \
+ --inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
+ % (treedir, treedir, datadir, locale, encoding, outputpath, name)
+
+ cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
+ (path, i18npath, gconvpath, localedef_opts)
+ else: # earlier slower qemu way
+ qemu = qemu_target_binary(d)
+ localedef_opts = "--force --old-style --no-archive --prefix=%s \
+ --inputfile=%s/i18n/locales/%s --charmap=%s %s" \
+ % (treedir, datadir, locale, encoding, name)
+
+ qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
+ if not qemu_options:
+ qemu_options = d.getVar('QEMU_OPTIONS', True)
+
+ cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
+ -E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
+ (path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
+
+ commands["%s/%s" % (outputpath, name)] = cmd
+
+ bb.note("generating locale %s (%s)" % (locale, encoding))
+
+ def output_locale(name, locale, encoding):
+ pkgname = d.getVar('MLPREFIX') + 'locale-base-' + legitimize_package_name(name)
+ d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
+ d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
+ rprovides = ' %svirtual-locale-%s' % (mlprefix, legitimize_package_name(name))
+ m = re.match("(.*)_(.*)", name)
+ if m:
+ rprovides += ' %svirtual-locale-%s' % (mlprefix, m.group(1))
+ d.setVar('RPROVIDES_%s' % pkgname, rprovides)
+
+ if use_bin == "compile":
+ output_locale_binary_rdepends(name, pkgname, locale, encoding)
+ output_locale_binary(name, pkgname, locale, encoding)
+ elif use_bin == "precompiled":
+ output_locale_binary_rdepends(name, pkgname, locale, encoding)
+ else:
+ output_locale_source(name, pkgname, locale, encoding)
+
+ if use_bin == "compile":
+ bb.note("preparing tree for binary locale generation")
+ bb.build.exec_func("do_prep_locale_tree", d)
+
+ utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
+ encodings = {}
+ for locale in to_generate:
+ charset = supported[locale]
+ if utf8_only and charset != 'UTF-8':
+ continue
+
+ m = dot_re.match(locale)
+ if m:
+ base = m.group(1)
+ else:
+ base = locale
+
+ # Precompiled locales are kept as is, obeying SUPPORTED, while
+ # others are adjusted, ensuring that the non-suffixed locales
+ # are utf-8, while the suffixed are not.
+ if use_bin == "precompiled":
+ output_locale(locale, base, charset)
+ else:
+ if charset == 'UTF-8':
+ output_locale(base, base, charset)
+ else:
+ output_locale('%s.%s' % (base, charset), base, charset)
+
+ if use_bin == "compile":
+ makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
+ m = open(makefile, "w")
+ m.write("all: %s\n\n" % " ".join(commands.keys()))
+ for cmd in commands:
+ m.write(cmd + ":\n")
+ m.write(" " + commands[cmd] + "\n\n")
+ m.close()
+ d.setVar("B", os.path.dirname(makefile))
+ d.setVar("EXTRA_OEMAKE", "${PARALLEL_MAKE}")
+ bb.note("Executing binary locale generation makefile")
+ bb.build.exec_func("oe_runmake", d)
+ bb.note("collecting binary locales from locale tree")
+ bb.build.exec_func("do_collect_bins_from_locale_tree", d)
+ do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', extra_depends='', allow_dirs=True)
+ elif use_bin == "precompiled":
+ do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
+ output_pattern=bpn+'-binary-localedata-%s', \
+ description='binary locale definition for %s', extra_depends='', allow_dirs=True)
+ else:
+ bb.note("generation of binary locales disabled. this may break i18n!")
+
+}
+
+# We want to do this indirection so that we can safely 'return'
+# from the called function even though we're prepending
+python populate_packages_prepend () {
+ bb.build.exec_func('package_do_split_gconvs', d)
+}
+
diff --git a/meta/classes/license.bbclass b/meta/classes/license.bbclass
new file mode 100644
index 000000000..6c4a6739f
--- /dev/null
+++ b/meta/classes/license.bbclass
@@ -0,0 +1,390 @@
+# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
+# LIC_FILES_CHKSUM.
+# TODO:
+# - There is a real issue revolving around license naming standards.
+
+LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
+LICSSTATEDIR = "${WORKDIR}/license-destdir/"
+
+addtask populate_lic after do_patch before do_package
+do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
+do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
+
+# Standards are great! Everyone has their own. In an effort to standardize licensing
+# names, common-licenses will use the SPDX standard license names. In order to not
+# break the non-standardized license names that we find in LICENSE, we'll set
+# up a bunch of VarFlags to accomodate non-SPDX license names.
+#
+# We should really discuss standardizing this field, but that's a longer term goal.
+# For now, we can do this and it should grab the most common LICENSE naming variations.
+#
+# We should NEVER have a GPL/LGPL without a version!!!!
+# Any mapping to MPL/LGPL/GPL should be fixed
+# see: https://wiki.yoctoproject.org/wiki/License_Audit
+
+# GPL variations
+SPDXLICENSEMAP[GPL-1] = "GPL-1.0"
+SPDXLICENSEMAP[GPLv1] = "GPL-1.0"
+SPDXLICENSEMAP[GPLv1.0] = "GPL-1.0"
+SPDXLICENSEMAP[GPL-2] = "GPL-2.0"
+SPDXLICENSEMAP[GPLv2] = "GPL-2.0"
+SPDXLICENSEMAP[GPLv2.0] = "GPL-2.0"
+SPDXLICENSEMAP[GPL-3] = "GPL-3.0"
+SPDXLICENSEMAP[GPLv3] = "GPL-3.0"
+SPDXLICENSEMAP[GPLv3.0] = "GPL-3.0"
+
+#LGPL variations
+SPDXLICENSEMAP[LGPLv2] = "LGPL-2.0"
+SPDXLICENSEMAP[LGPLv2.0] = "LGPL-2.0"
+SPDXLICENSEMAP[LGPL2.1] = "LGPL-2.1"
+SPDXLICENSEMAP[LGPLv2.1] = "LGPL-2.1"
+SPDXLICENSEMAP[LGPLv3] = "LGPL-3.0"
+
+#MPL variations
+SPDXLICENSEMAP[MPL-1] = "MPL-1.0"
+SPDXLICENSEMAP[MPLv1] = "MPL-1.0"
+SPDXLICENSEMAP[MPLv1.1] = "MPL-1.1"
+
+#MIT variations
+SPDXLICENSEMAP[MIT-X] = "MIT"
+SPDXLICENSEMAP[MIT-style] = "MIT"
+
+#Openssl variations
+SPDXLICENSEMAP[openssl] = "OpenSSL"
+
+#Python variations
+SPDXLICENSEMAP[PSF] = "Python-2.0"
+SPDXLICENSEMAP[PSFv2] = "Python-2.0"
+SPDXLICENSEMAP[Python-2] = "Python-2.0"
+
+#Apache variations
+SPDXLICENSEMAP[Apachev2] = "Apache-2.0"
+SPDXLICENSEMAP[Apache-2] = "Apache-2.0"
+
+#Artistic variations
+SPDXLICENSEMAP[Artisticv1] = "Artistic-1.0"
+SPDXLICENSEMAP[Artistic-1] = "Artistic-1.0"
+
+#Academic variations
+SPDXLICENSEMAP[AFL-2] = "AFL-2.0"
+SPDXLICENSEMAP[AFL-1] = "AFL-1.2"
+SPDXLICENSEMAP[AFLv2] = "AFL-2.0"
+SPDXLICENSEMAP[AFLv1] = "AFL-1.2"
+
+#Other variations
+SPDXLICENSEMAP[EPLv1.0] = "EPL-1.0"
+
+license_create_manifest() {
+ mkdir -p ${LICENSE_DIRECTORY}/${IMAGE_NAME}
+ # Get list of installed packages
+ list_installed_packages | grep -v "locale" |sort > ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest
+ INSTALLED_PKGS=`cat ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest`
+ # remove existing license.manifest file
+ if [ -f ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest ]; then
+ rm ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ fi
+ # list of installed packages is broken for deb
+ for pkg in ${INSTALLED_PKGS}; do
+ # not the best way to do this but licenses are not arch dependant iirc
+ files=`find ${TMPDIR}/pkgdata/*/runtime -name ${pkg}| head -1`
+ for filename in $files; do
+ pkged_pn="$(sed -n 's/^PN: //p' ${filename})"
+ pkged_lic="$(sed -n '/^LICENSE: /{ s/^LICENSE: //; s/[+|&()*]/ /g; s/ */ /g; p }' ${filename})"
+ # check to see if the package name exists in the manifest. if so, bail.
+ if ! grep -q "PACKAGE NAME: ${pkg}" ${filename}; then
+ # exclude local recipes
+ if [ ! "${pkged_pn}" = "*locale*" ]; then
+ echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ echo "LICENSE: " >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ for lic in ${pkged_lic}; do
+ if [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic}" ]; then
+ echo ${lic}|sed s'/generic_//'g >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ else
+ echo "WARNING: The license listed, " ${lic} " was not in the licenses collected for " ${pkged_pn}>> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ fi
+ done
+ echo "" >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
+ fi
+ fi
+ done
+ done
+
+ # Two options here:
+ # - Just copy the manifest
+ # - Copy the manifest and the license directories
+ # With both options set we see a .5 M increase in core-image-minimal
+ if [ -n "${COPY_LIC_MANIFEST}" ]; then
+ mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/
+ cp ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest ${IMAGE_ROOTFS}/usr/share/common-licenses/license.manifest
+ if [ -n "${COPY_LIC_DIRS}" ]; then
+ for pkg in ${INSTALLED_PKGS}; do
+ mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
+ for lic in `ls ${LICENSE_DIRECTORY}/${pkg}`; do
+ # Really don't need to copy the generics as they're
+ # represented in the manifest and in the actual pkg licenses
+ # Doing so would make your image quite a bit larger
+ if [[ "${lic}" != "generic_"* ]]; then
+ cp ${LICENSE_DIRECTORY}/${pkg}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
+ elif [[ "${lic}" == "generic_"* ]]; then
+ if [ ! -f ${IMAGE_ROOTFS}/usr/share/common-licenses/${lic} ]; then
+ cp ${LICENSE_DIRECTORY}/${pkg}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/
+ fi
+ ln -s ../${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
+ fi
+ done
+ done
+ fi
+ fi
+
+}
+
+python do_populate_lic() {
+ """
+ Populate LICENSE_DIRECTORY with licenses.
+ """
+ import os
+ import bb
+ import shutil
+ import oe.license
+
+ pn = d.getVar('PN', True)
+ for package in d.getVar('PACKAGES', True):
+ if d.getVar('LICENSE_' + pn + '-' + package, True):
+ license_types = license_types + ' & ' + \
+ d.getVar('LICENSE_' + pn + '-' + package, True)
+
+ #If we get here with no license types, then that means we have a recipe
+ #level license. If so, we grab only those.
+ try:
+ license_types
+ except NameError:
+ # All the license types at the recipe level
+ license_types = d.getVar('LICENSE', True)
+
+ # All the license files for the package
+ lic_files = d.getVar('LIC_FILES_CHKSUM', True)
+ pn = d.getVar('PN', True)
+ # The base directory we wrangle licenses to
+ destdir = os.path.join(d.getVar('LICSSTATEDIR', True), pn)
+ # The license files are located in S/LIC_FILE_CHECKSUM.
+ srcdir = d.getVar('S', True)
+ # Directory we store the generic licenses as set in the distro configuration
+ generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
+ license_source_dirs = []
+ license_source_dirs.append(generic_directory)
+ try:
+ additional_lic_dirs = d.getVar('LICENSE_DIR', True).split()
+ for lic_dir in additional_lic_dirs:
+ license_source_dirs.append(lic_dir)
+ except:
+ pass
+
+ class FindVisitor(oe.license.LicenseVisitor):
+ def visit_Str(self, node):
+ #
+ # Until I figure out what to do with
+ # the two modifiers I support (or greater = +
+ # and "with exceptions" being *
+ # we'll just strip out the modifier and put
+ # the base license.
+ find_license(node.s.replace("+", "").replace("*", ""))
+ self.generic_visit(node)
+
+ def find_license(license_type):
+ try:
+ bb.mkdirhier(gen_lic_dest)
+ except:
+ pass
+ spdx_generic = None
+ license_source = None
+ # If the generic does not exist we need to check to see if there is an SPDX mapping to it
+ for lic_dir in license_source_dirs:
+ if not os.path.isfile(os.path.join(lic_dir, license_type)):
+ if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
+ # Great, there is an SPDXLICENSEMAP. We can copy!
+ bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
+ spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
+ license_source = lic_dir
+ break
+ elif os.path.isfile(os.path.join(lic_dir, license_type)):
+ spdx_generic = license_type
+ license_source = lic_dir
+ break
+
+ if spdx_generic and license_source:
+ # we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
+ # audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
+
+ bb.copyfile(os.path.join(license_source, spdx_generic), os.path.join(os.path.join(d.getVar('LICSSTATEDIR', True), pn), "generic_" + license_type))
+ if not os.path.isfile(os.path.join(os.path.join(d.getVar('LICSSTATEDIR', True), pn), "generic_" + license_type)):
+ # If the copy didn't occur, something horrible went wrong and we fail out
+ bb.warn("%s for %s could not be copied for some reason. It may not exist. WARN for now." % (spdx_generic, pn))
+ else:
+ # And here is where we warn people that their licenses are lousy
+ bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
+ pass
+
+ try:
+ bb.mkdirhier(destdir)
+ except:
+ pass
+
+ if not generic_directory:
+ raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
+
+ if not lic_files:
+ # No recipe should have an invalid license file. This is checked else
+ # where, but let's be pedantic
+ bb.note(pn + ": Recipe file does not have license file information.")
+ return True
+
+ for url in lic_files.split():
+ (type, host, path, user, pswd, parm) = bb.decodeurl(url)
+ # We want the license file to be copied into the destination
+ srclicfile = os.path.join(srcdir, path)
+ ret = bb.copyfile(srclicfile, os.path.join(destdir, os.path.basename(path)))
+ # If the copy didn't occur, something horrible went wrong and we fail out
+ if not ret:
+ bb.warn("%s could not be copied for some reason. It may not exist. WARN for now." % srclicfile)
+
+ v = FindVisitor()
+ try:
+ v.visit_string(license_types)
+ except oe.license.InvalidLicense as exc:
+ bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
+ except SyntaxError:
+ bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
+
+}
+
+def return_spdx(d, license):
+ """
+ This function returns the spdx mapping of a license.
+ """
+ if d.getVarFlag('SPDXLICENSEMAP', license) != None:
+ return license
+ else:
+ return d.getVarFlag('SPDXLICENSEMAP', license_type)
+
+def incompatible_license(d, dont_want_license, package=""):
+ """
+ This function checks if a recipe has only incompatible licenses. It also take into consideration 'or'
+ operand.
+ """
+ import re
+ import oe.license
+ from fnmatch import fnmatchcase as fnmatch
+ pn = d.getVar('PN', True)
+ dont_want_licenses = []
+ dont_want_licenses.append(d.getVar('INCOMPATIBLE_LICENSE', True))
+ recipe_license = d.getVar('LICENSE', True)
+ if package != "":
+ if d.getVar('LICENSE_' + pn + '-' + package, True):
+ license = d.getVar('LICENSE_' + pn + '-' + package, True)
+ else:
+ license = recipe_license
+ else:
+ license = recipe_license
+ spdx_license = return_spdx(d, dont_want_license)
+ dont_want_licenses.append(spdx_license)
+
+ def include_license(license):
+ if any(fnmatch(license, pattern) for pattern in dont_want_licenses):
+ return False
+ else:
+ return True
+
+ def choose_licenses(a, b):
+ if all(include_license(lic) for lic in a):
+ return a
+ else:
+ return b
+
+ """
+ If you want to exlude license named generically 'X', we surely want to exlude 'X+' as well.
+ In consequence, we will exclude the '+' character from LICENSE in case INCOMPATIBLE_LICENSE
+ is not a 'X+' license.
+ """
+ if not re.search(r'[+]',dont_want_license):
+ licenses=oe.license.flattened_licenses(re.sub(r'[+]', '', license), choose_licenses)
+ else:
+ licenses=oe.license.flattened_licenses(license, choose_licenses)
+
+ for onelicense in licenses:
+ if not include_license(onelicense):
+ return True
+ return False
+
+def check_license_flags(d):
+ """
+ This function checks if a recipe has any LICENSE_FLAGs that
+ aren't whitelisted.
+
+ If it does, it returns the first LICENSE_FLAG missing from the
+ whitelist, or all the LICENSE_FLAGs if there is no whitelist.
+
+ If everything is is properly whitelisted, it returns None.
+ """
+
+ def license_flag_matches(flag, whitelist, pn):
+ """
+ Return True if flag matches something in whitelist, None if not.
+
+ Before we test a flag against the whitelist, we append _${PN}
+ to it. We then try to match that string against the
+ whitelist. This covers the normal case, where we expect
+ LICENSE_FLAGS to be a simple string like 'commercial', which
+ the user typically matches exactly in the whitelist by
+ explicitly appending the package name e.g 'commercial_foo'.
+ If we fail the match however, we then split the flag across
+ '_' and append each fragment and test until we either match or
+ run out of fragments.
+ """
+ flag_pn = ("%s_%s" % (flag, pn))
+ for candidate in whitelist:
+ if flag_pn == candidate:
+ return True
+
+ flag_cur = ""
+ flagments = flag_pn.split("_")
+ flagments.pop() # we've already tested the full string
+ for flagment in flagments:
+ if flag_cur:
+ flag_cur += "_"
+ flag_cur += flagment
+ for candidate in whitelist:
+ if flag_cur == candidate:
+ return True
+ return False
+
+ def all_license_flags_match(license_flags, whitelist):
+ """ Return first unmatched flag, None if all flags match """
+ pn = d.getVar('PN', True)
+ split_whitelist = whitelist.split()
+ for flag in license_flags.split():
+ if not license_flag_matches(flag, split_whitelist, pn):
+ return flag
+ return None
+
+ license_flags = d.getVar('LICENSE_FLAGS', True)
+ if license_flags:
+ whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
+ if not whitelist:
+ return license_flags
+ unmatched_flag = all_license_flags_match(license_flags, whitelist)
+ if unmatched_flag:
+ return unmatched_flag
+ return None
+
+SSTATETASKS += "do_populate_lic"
+do_populate_lic[sstate-name] = "populate-lic"
+do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
+do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
+
+ROOTFS_POSTINSTALL_COMMAND += "license_create_manifest; "
+
+python do_populate_lic_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_lic_setscene
diff --git a/meta/classes/linux-kernel-base.bbclass b/meta/classes/linux-kernel-base.bbclass
index 4e2e2da37..4f2b0a4a9 100644
--- a/meta/classes/linux-kernel-base.bbclass
+++ b/meta/classes/linux-kernel-base.bbclass
@@ -1,9 +1,12 @@
# parse kernel ABI version out of <linux/version.h>
def get_kernelversion(p):
- import re, os
+ import re
fn = p + '/include/linux/utsrelease.h'
if not os.path.isfile(fn):
+ # after 2.6.33-rc1
+ fn = p + '/include/generated/utsrelease.h'
+ if not os.path.isfile(fn):
fn = p + '/include/linux/version.h'
import re
@@ -21,16 +24,7 @@ def get_kernelversion(p):
return m.group(1)
return None
-def get_kernelmajorversion(p):
- import re
- r = re.compile("([0-9]+\.[0-9]+).*")
- m = r.match(p);
- if m:
- return m.group(1)
- return None
-
def linux_module_packages(s, d):
- import bb, os.path
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
diff --git a/meta/classes/logging.bbclass b/meta/classes/logging.bbclass
new file mode 100644
index 000000000..78d65bda3
--- /dev/null
+++ b/meta/classes/logging.bbclass
@@ -0,0 +1,72 @@
+# The following logging mechanisms are to be used in bash functions of recipes.
+# They are intended to map one to one in intention and output format with the
+# python recipe logging functions of a similar naming convention: bb.plain(),
+# bb.note(), etc.
+#
+# For the time being, all of these print only to the task logs. Future
+# enhancements may integrate these calls with the bitbake logging
+# infrastructure, allowing for printing to the console as appropriate. The
+# interface and intention statements reflect that future goal. Once it is
+# in place, no changes will be necessary to recipes using these logging
+# mechanisms.
+
+# Print the output exactly as it is passed in. Typically used for output of
+# tasks that should be seen on the console. Use sparingly.
+# Output: logs console
+# NOTE: console output is not currently implemented.
+bbplain() {
+ echo "$*"
+}
+
+# Notify the user of a noteworthy condition.
+# Output: logs console
+# NOTE: console output is not currently implemented.
+bbnote() {
+ echo "NOTE: $*"
+}
+
+# Print a warning to the log. Warnings are non-fatal, and do not
+# indicate a build failure.
+# Output: logs
+bbwarn() {
+ echo "WARNING: $*"
+}
+
+# Print an error to the log. Errors are non-fatal in that the build can
+# continue, but they do indicate a build failure.
+# Output: logs
+bberror() {
+ echo "ERROR: $*"
+}
+
+# Print a fatal error to the log. Fatal errors indicate build failure
+# and halt the build, exiting with an error code.
+# Output: logs
+bbfatal() {
+ echo "ERROR: $*"
+ exit 1
+}
+
+# Print debug messages. These are appropriate for progress checkpoint
+# messages to the logs. Depending on the debug log level, they may also
+# go to the console.
+# Output: logs console
+# Usage: bbdebug 1 "first level debug message"
+# bbdebug 2 "second level debug message"
+# NOTE: console output is not currently implemented.
+bbdebug() {
+ USAGE='Usage: bbdebug [123] "message"'
+ if [ $# -lt 2 ]; then
+ bbfatal "$USAGE"
+ fi
+
+ # Strip off the debug level and ensure it is an integer
+ DBGLVL=$1; shift
+ if ! [[ "$DBGLVL" =~ ^[0-9]+ ]]; then
+ bbfatal "$USAGE"
+ fi
+
+ # All debug output is printed to the logs
+ echo "DEBUG: $*"
+}
+
diff --git a/meta/classes/metadata_scm.bbclass b/meta/classes/metadata_scm.bbclass
new file mode 100644
index 000000000..62650be67
--- /dev/null
+++ b/meta/classes/metadata_scm.bbclass
@@ -0,0 +1,77 @@
+METADATA_BRANCH ?= "${@base_detect_branch(d)}"
+METADATA_REVISION ?= "${@base_detect_revision(d)}"
+
+def base_detect_revision(d):
+ path = base_get_scmbasepath(d)
+
+ scms = [base_get_metadata_git_revision, \
+ base_get_metadata_svn_revision]
+
+ for scm in scms:
+ rev = scm(path, d)
+ if rev <> "<unknown>":
+ return rev
+
+ return "<unknown>"
+
+def base_detect_branch(d):
+ path = base_get_scmbasepath(d)
+
+ scms = [base_get_metadata_git_branch]
+
+ for scm in scms:
+ rev = scm(path, d)
+ if rev <> "<unknown>":
+ return rev.strip()
+
+ return "<unknown>"
+
+def base_get_scmbasepath(d):
+ return d.getVar( 'COREBASE', True)
+
+def base_get_metadata_monotone_branch(path, d):
+ monotone_branch = "<unknown>"
+ try:
+ monotone_branch = file( "%s/_MTN/options" % path ).read().strip()
+ if monotone_branch.startswith( "database" ):
+ monotone_branch_words = monotone_branch.split()
+ monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
+ except:
+ pass
+ return monotone_branch
+
+def base_get_metadata_monotone_revision(path, d):
+ monotone_revision = "<unknown>"
+ try:
+ monotone_revision = file( "%s/_MTN/revision" % path ).read().strip()
+ if monotone_revision.startswith( "format_version" ):
+ monotone_revision_words = monotone_revision.split()
+ monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
+ except IOError:
+ pass
+ return monotone_revision
+
+def base_get_metadata_svn_revision(path, d):
+ revision = "<unknown>"
+ try:
+ revision = file( "%s/.svn/entries" % path ).readlines()[3].strip()
+ except IOError:
+ pass
+ return revision
+
+def base_get_metadata_git_branch(path, d):
+ branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read()
+
+ if len(branch) != 0:
+ return branch
+ return "<unknown>"
+
+def base_get_metadata_git_revision(path, d):
+ f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path)
+ data = f.read()
+ if f.close() is None:
+ rev = data.split(" ")[0]
+ if len(rev) != 0:
+ return rev
+ return "<unknown>"
+
diff --git a/meta/classes/mime.bbclass b/meta/classes/mime.bbclass
new file mode 100644
index 000000000..6302747dc
--- /dev/null
+++ b/meta/classes/mime.bbclass
@@ -0,0 +1,60 @@
+DEPENDS += "shared-mime-info-native shared-mime-info"
+
+EXTRA_OECONF += "--disable-update-mimedb"
+
+mime_postinst() {
+if [ "$1" = configure ]; then
+ UPDATEMIMEDB=`which update-mime-database`
+ if [ -x "$UPDATEMIMEDB" ] ; then
+ echo "Updating MIME database... this may take a while."
+ $UPDATEMIMEDB $D${datadir}/mime
+ else
+ echo "Missing update-mime-database, update of mime database failed!"
+ exit 1
+ fi
+fi
+}
+
+mime_postrm() {
+if [ "$1" = remove ] || [ "$1" = upgrade ]; then
+ UPDATEMIMEDB=`which update-mime-database`
+ if [ -x "$UPDATEMIMEDB" ] ; then
+ echo "Updating MIME database... this may take a while."
+ $UPDATEMIMEDB $D${datadir}/mime
+ else
+ echo "Missing update-mime-database, update of mime database failed!"
+ exit 1
+ fi
+fi
+}
+
+python populate_packages_append () {
+ import re
+ packages = d.getVar('PACKAGES', True).split()
+ pkgdest = d.getVar('PKGDEST', True)
+
+ for pkg in packages:
+ mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
+ mimes = []
+ mime_re = re.compile(".*\.xml$")
+ if os.path.exists(mime_dir):
+ for f in os.listdir(mime_dir):
+ if mime_re.match(f):
+ mimes.append(f)
+ if mimes:
+ bb.note("adding mime postinst and postrm scripts to %s" % pkg)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += d.getVar('mime_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
+ if not postrm:
+ postrm = '#!/bin/sh\n'
+ postrm += d.getVar('mime_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
+ bb.note("adding shared-mime-info-data dependency to %s" % pkg)
+ rdepends = explode_deps(d.getVar('RDEPENDS_' + pkg, False) or d.getVar('RDEPENDS', False) or "" )
+ rdepends.append("shared-mime-info-data")
+ d.setVar('RDEPENDS_' + pkg, " " + " ".join(rdepends))
+}
diff --git a/meta/classes/mirrors.bbclass b/meta/classes/mirrors.bbclass
new file mode 100644
index 000000000..86ccd7a76
--- /dev/null
+++ b/meta/classes/mirrors.bbclass
@@ -0,0 +1,66 @@
+MIRRORS += "\
+${DEBIAN_MIRROR}/main http://snapshot.debian.org/archive/debian-archive/20120328T092752Z/debian/pool/ \n \
+${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
+${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
+${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
+${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
+ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
+ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
+ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
+ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
+ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
+ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
+ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnutls.org/pub/gnutls/ \n \
+ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
+ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/ \n \
+ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/ \n \
+ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/ \n \
+http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
+http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
+ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
+http://www.apache.org/dist http://archive.apache.org/dist \n \
+cvs://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+svn://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+git://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+hg://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+bzr://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+svk://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+p4://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+osc://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+ftp://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
+cvs://.*/.* http://sources.openembedded.org/ \n \
+svn://.*/.* http://sources.openembedded.org/ \n \
+git://.*/.* http://sources.openembedded.org/ \n \
+hg://.*/.* http://sources.openembedded.org/ \n \
+bzr://.*/.* http://sources.openembedded.org/ \n \
+svk://.*/.* http://sources.openembedded.org/ \n \
+p4://.*/.* http://sources.openembedded.org/ \n \
+osc://.*/.* http://sources.openembedded.org/ \n \
+https?$://.*/.* http://sources.openembedded.org/ \n \
+ftp://.*/.* http://sources.openembedded.org/ \n \
+"
diff --git a/meta/classes/module-base.bbclass b/meta/classes/module-base.bbclass
index c98baceea..9379bf87f 100644
--- a/meta/classes/module-base.bbclass
+++ b/meta/classes/module-base.bbclass
@@ -6,8 +6,7 @@ export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
-export KERNEL_SOURCE = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-source')}"
-KERNEL_OBJECT_SUFFIX = "${@[".o", ".ko"][base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion') > "2.6.0"]}"
+KERNEL_OBJECT_SUFFIX = ".ko"
KERNEL_CCSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-ccsuffix')}"
KERNEL_LDSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-ldsuffix')}"
KERNEL_ARSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-arsuffix')}"
@@ -22,8 +21,8 @@ TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}"
-KERNEL_LD = "${LD}${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
-KERNEL_AR = "${AR}${KERNEL_ARSUFFIX} ${HOST_AR_KERNEL_ARCH}"
+KERNEL_LD = "${HOST_PREFIX}ld${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
+KERNEL_AR = "${HOST_PREFIX}ar${KERNEL_ARSUFFIX} ${HOST_AR_KERNEL_ARCH}"
# kernel modules are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"
diff --git a/meta/classes/module.bbclass b/meta/classes/module.bbclass
index d16d46208..91628e429 100644
--- a/meta/classes/module.bbclass
+++ b/meta/classes/module.bbclass
@@ -3,6 +3,21 @@ DEPENDS += "virtual/kernel"
inherit module-base
+#
+# Ensure the hostprogs are available for module compilation. Modules that
+# inherit this recipe and override do_compile() should be sure to call
+# do_make_scripts() or ensure the scripts are built independently.
+#
+do_make_scripts() {
+ unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
+ oe_runmake CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
+ -C ${STAGING_KERNEL_DIR} scripts
+}
+
+addtask make_scripts before do_compile
+do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
+do_make_scripts[deptask] = "do_populate_sysroot"
+
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
@@ -15,19 +30,21 @@ module_do_compile() {
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
- oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" CC="${KERNEL_CC}" LD="${KERNEL_LD}" modules_install
+ oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
+ KERNEL_SRC=${STAGING_KERNEL_DIR} \
+ CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
+ modules_install
}
pkg_postinst_append () {
- if [ -n "$D" ]; then
- exit 1
- fi
+if [ -z "$D" ]; then
depmod -a
update-modules || true
+fi
}
pkg_postrm_append () {
- update-modules || true
+update-modules || true
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/module_strip.bbclass b/meta/classes/module_strip.bbclass
index 63e656979..e69de29bb 100644
--- a/meta/classes/module_strip.bbclass
+++ b/meta/classes/module_strip.bbclass
@@ -1,22 +0,0 @@
-#DEPENDS_append = " module-strip"
-
-do_strip_modules () {
- for p in ${PACKAGES}; do
- if test -e ${WORKDIR}/install/$p/lib/modules; then
- modules="`find ${WORKDIR}/install/$p/lib/modules -name \*${KERNEL_OBJECT_SUFFIX}`"
- if [ -n "$modules" ]; then
- for module in $modules ; do
- if ! [ -d "$module" ] ; then
- ${STRIP} -v -g $module
- fi
- done
-# NM="${CROSS_DIR}/bin/${HOST_PREFIX}nm" OBJCOPY="${CROSS_DIR}/bin/${HOST_PREFIX}objcopy" strip_module $modules
- fi
- fi
- done
-}
-
-python do_package_append () {
- if (bb.data.getVar('INHIBIT_PACKAGE_STRIP', d, 1) != '1'):
- bb.build.exec_func('do_strip_modules', d)
-}
diff --git a/meta/classes/mozilla.bbclass b/meta/classes/mozilla.bbclass
deleted file mode 100644
index d7ec74742..000000000
--- a/meta/classes/mozilla.bbclass
+++ /dev/null
@@ -1,53 +0,0 @@
-SECTION = "x11/utils"
-DEPENDS += "gnu-config-native virtual/libintl xt libxi \
- zip-native gtk+"
-LICENSE = "MPL NPL"
-SRC_URI += "file://mozconfig"
-
-inherit gettext pkgconfig
-
-EXTRA_OECONF = "--target=${TARGET_SYS} --host=${BUILD_SYS} \
- --build=${BUILD_SYS} --prefix=${prefix}"
-EXTRA_OEMAKE = "'HOST_LIBIDL_LIBS=${HOST_LIBIDL_LIBS}' \
- 'HOST_LIBIDL_CFLAGS=${HOST_LIBIDL_CFLAGS}'"
-SELECTED_OPTIMIZATION = "-Os -fsigned-char -fno-strict-aliasing"
-
-export CROSS_COMPILE = "1"
-export MOZCONFIG = "${WORKDIR}/mozconfig"
-export MOZ_OBJDIR = "${S}"
-
-export CONFIGURE_ARGS = "${EXTRA_OECONF}"
-export HOST_LIBIDL_CFLAGS = "`${HOST_LIBIDL_CONFIG} --cflags`"
-export HOST_LIBIDL_LIBS = "`${HOST_LIBIDL_CONFIG} --libs`"
-export HOST_LIBIDL_CONFIG = "PKG_CONFIG_SYSROOT_DIR="" PKG_CONFIG_PATH=${STAGING_LIBDIR_NATIVE}/pkgconfig pkg-config libIDL-2.0"
-export HOST_CC = "${BUILD_CC}"
-export HOST_CXX = "${BUILD_CXX}"
-export HOST_CFLAGS = "${BUILD_CFLAGS}"
-export HOST_CXXFLAGS = "${BUILD_CXXFLAGS}"
-export HOST_LDFLAGS = "${BUILD_LDFLAGS}"
-export HOST_RANLIB = "${BUILD_RANLIB}"
-export HOST_AR = "${BUILD_AR}"
-
-mozilla_do_configure() {
- (
- set -e
- for cg in `find ${S} -name config.guess`; do
- install -m 0755 \
- ${STAGING_DATADIR_NATIVE}/gnu-config/config.guess \
- ${STAGING_DATADIR_NATIVE}/gnu-config/config.sub \
- `dirname $cg`/
- done
- )
- oe_runmake -f client.mk ${MOZ_OBJDIR}/Makefile \
- ${MOZ_OBJDIR}/config.status
-}
-
-mozilla_do_compile() {
- oe_runmake -f client.mk build_all
-}
-
-mozilla_do_install() {
- oe_runmake DESTDIR="${D}" destdir="${D}" install
-}
-
-EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/meta/classes/multilib.bbclass b/meta/classes/multilib.bbclass
new file mode 100644
index 000000000..f3e7618c9
--- /dev/null
+++ b/meta/classes/multilib.bbclass
@@ -0,0 +1,99 @@
+python multilib_virtclass_handler () {
+ if not isinstance(e, bb.event.RecipePreFinalise):
+ return
+
+ cls = e.data.getVar("BBEXTENDCURR", True)
+ variant = e.data.getVar("BBEXTENDVARIANT", True)
+ if cls != "multilib" or not variant:
+ return
+
+ # There should only be one kernel in multilib configs
+ if bb.data.inherits_class('kernel', e.data) or bb.data.inherits_class('module-base', e.data):
+ raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
+
+ if bb.data.inherits_class('image', e.data):
+ e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
+ return
+
+ save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
+ for name in save_var_name.split():
+ val=e.data.getVar(name, True)
+ if val:
+ e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
+
+ # Expand this since this won't work correctly once we set a multilib into place
+ e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+
+ override = ":virtclass-multilib-" + variant
+
+ e.data.setVar("MLPREFIX", variant + "-")
+ e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
+ e.data.setVar("SHLIBSDIR_virtclass-multilib-" + variant ,e.data.getVar("SHLIBSDIR", False) + "/" + variant)
+ if e.data.getVar("TARGET_VENDOR_virtclass-multilib-" + variant, False) is None:
+ e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + variant, e.data.getVar("TARGET_VENDOR", False) + "ml" + variant)
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
+}
+
+addhandler multilib_virtclass_handler
+
+STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
+
+python __anonymous () {
+ variant = d.getVar("BBEXTENDVARIANT", True)
+
+ import oe.classextend
+
+ clsextend = oe.classextend.ClassExtender(variant, d)
+
+ if bb.data.inherits_class('image', d):
+ clsextend.map_depends_variable("PACKAGE_INSTALL")
+ clsextend.map_depends_variable("LINGUAS_INSTALL")
+ clsextend.map_depends_variable("RDEPENDS")
+ pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
+ d.setVar("PACKAGE_INSTALL", pinstall)
+ d.setVar("LINGUAS_INSTALL", "")
+ # FIXME, we need to map this to something, not delete it!
+ d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
+ return
+
+ clsextend.rename_packages()
+ clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
+
+ clsextend.map_depends_variable("DEPENDS")
+ clsextend.map_packagevars()
+ clsextend.map_variable("PROVIDES")
+ clsextend.map_variable("PACKAGES_DYNAMIC")
+ clsextend.map_variable("PACKAGE_INSTALL")
+ clsextend.map_variable("INITSCRIPT_PACKAGES")
+}
+
+PACKAGEFUNCS_append = "do_package_qa_multilib"
+
+python do_package_qa_multilib() {
+
+ def check_mlprefix(pkg, var, mlprefix):
+ values = bb.utils.explode_dep_versions(d.getVar('%s_%s' % (var, pkg), True) or d.getVar(var, True) or "")
+ candidates = []
+ for i in values.keys():
+ if i.startswith('virtual/'):
+ i = i[len('virtual/'):]
+ if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)):
+ candidates.append(i)
+ if len(candidates) > 0:
+ bb.warn("Multilib QA Issue: %s package %s - suspicious values '%s' in %s"
+ % (d.getVar('PN', True), pkg, ' '.join(candidates), var))
+
+ ml = d.getVar('MLPREFIX', True)
+ if not ml:
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ for pkg in packages.split():
+ check_mlprefix(pkg, 'RDEPENDS', ml)
+ check_mlprefix(pkg, 'RPROVIDES', ml)
+ check_mlprefix(pkg, 'RRECOMMENDS', ml)
+ check_mlprefix(pkg, 'RSUGGESTS', ml)
+ check_mlprefix(pkg, 'RREPLACES', ml)
+ check_mlprefix(pkg, 'RCONFLICTS', ml)
+}
+
diff --git a/meta/classes/multilib_global.bbclass b/meta/classes/multilib_global.bbclass
new file mode 100644
index 000000000..b76fd9416
--- /dev/null
+++ b/meta/classes/multilib_global.bbclass
@@ -0,0 +1,38 @@
+python multilib_virtclass_handler_global () {
+ if not e.data:
+ return
+
+ variant = e.data.getVar("BBEXTENDVARIANT", True)
+
+ if isinstance(e, bb.event.RecipeParsed) and not variant:
+ if bb.data.inherits_class('kernel', e.data) or bb.data.inherits_class('module-base', e.data):
+ variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
+
+ import oe.classextend
+ clsextends = []
+ for variant in variants:
+ clsextends.append(oe.classextend.ClassExtender(variant, e.data))
+
+ # Process PROVIDES
+ origprovs = provs = e.data.getVar("PROVIDES", True) or ""
+ for clsextend in clsextends:
+ provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
+ e.data.setVar("PROVIDES", provs)
+
+ # Process RPROVIDES
+ origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
+ for clsextend in clsextends:
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
+ e.data.setVar("RPROVIDES", rprovs)
+
+ # Process RPROVIDES_${PN}...
+ for pkg in (e.data.getVar("PACKAGES", True) or "").split():
+ origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
+ for clsextend in clsextends:
+ rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
+ rprovs = rprovs + " " + clsextend.extname + "-" + pkg
+ e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
+}
+
+addhandler multilib_virtclass_handler_global
+
diff --git a/meta/classes/multilib_header.bbclass b/meta/classes/multilib_header.bbclass
new file mode 100644
index 000000000..867bce413
--- /dev/null
+++ b/meta/classes/multilib_header.bbclass
@@ -0,0 +1,29 @@
+inherit siteinfo
+
+# If applicable on the architecture, this routine will rename the header and add
+# a unique identifier to the name for the ABI/bitsize that is being used. A wrapper will
+# be generated for the architecture that knows how to call all of the ABI variants for that
+# given architecture.
+#
+# TODO: mips64 n32 is not yet recognized in this code
+# when that is identified the name of the wrapped item should be "n32" and appropriately
+# determined int he if coding...
+#
+oe_multilib_header() {
+ # Do nothing on ARM, only one ABI is supported at once
+ if echo ${TARGET_ARCH} | grep -q arm; then
+ return
+ fi
+ for each_header in "$@" ; do
+ if [ ! -f "${D}/${includedir}/$each_header" ]; then
+ bberror "oe_multilib_header: Unable to find header $each_header."
+ continue
+ fi
+ stem=$(echo $each_header | sed 's#\.h$##')
+ ident=${SITEINFO_BITS}
+ # if mips64/n32 set ident to n32
+ mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
+
+ sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
+ done
+}
diff --git a/meta/classes/native.bbclass b/meta/classes/native.bbclass
index 98b62a054..bca48d463 100644
--- a/meta/classes/native.bbclass
+++ b/meta/classes/native.bbclass
@@ -1,13 +1,19 @@
+# We want native packages to be relocatable
+inherit relocatable
+
# Native packages are built indirectly via dependency,
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
PACKAGES = ""
+PACKAGES_virtclass-native = ""
+PACKAGES_DYNAMIC = ""
+PACKAGES_DYNAMIC_virtclass-native = ""
PACKAGE_ARCH = "${BUILD_ARCH}"
-BASE_PACKAGE_ARCH = "${BUILD_ARCH}"
-BASEPKG_HOST_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
-BASEPKG_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
+# used by cmake class
+OECMAKE_RPATH = "${libdir}"
+OECMAKE_RPATH_virtclass-native = "${libdir}"
# When this class has packaging enabled, setting
# RPROVIDES becomes unnecessary.
@@ -18,12 +24,17 @@ TARGET_OS = "${BUILD_OS}"
TARGET_VENDOR = "${BUILD_VENDOR}"
TARGET_PREFIX = "${BUILD_PREFIX}"
TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
+TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
+TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
+TARGET_FPU = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_OS = "${BUILD_OS}"
HOST_VENDOR = "${BUILD_VENDOR}"
HOST_PREFIX = "${BUILD_PREFIX}"
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
+HOST_LD_ARCH = "${BUILD_LD_ARCH}"
+HOST_AS_ARCH = "${BUILD_AS_ARCH}"
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
@@ -34,6 +45,11 @@ LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
+# native pkg doesn't need the TOOLCHAIN_OPTIONS.
+TOOLCHAIN_OPTIONS = ""
+
+DEPENDS_GETTEXT = "gettext-native"
+
# Don't use site files for native builds
export CONFIG_SITE = ""
@@ -41,94 +57,104 @@ export CONFIG_SITE = ""
export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
export F77 = "${CCACHE}${HOST_PREFIX}g77 ${HOST_CC_ARCH}"
-export CPP = "${HOST_PREFIX}gcc -E"
-export LD = "${HOST_PREFIX}ld"
+export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
+export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
export CCLD = "${CC}"
export AR = "${HOST_PREFIX}ar"
-export AS = "${HOST_PREFIX}as"
+export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
export RANLIB = "${HOST_PREFIX}ranlib"
export STRIP = "${HOST_PREFIX}strip"
# Path prefixes
-export base_prefix = "${STAGING_DIR_NATIVE}"
-export prefix = "${STAGING_DIR_NATIVE}${layout_prefix}"
-export exec_prefix = "${STAGING_DIR_NATIVE}${layout_exec_prefix}"
+base_prefix = "${STAGING_DIR_NATIVE}"
+prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
+exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
-# Base paths
-export base_bindir = "${STAGING_DIR_NATIVE}${layout_base_bindir}"
-export base_sbindir = "${STAGING_DIR_NATIVE}${layout_base_sbindir}"
-export base_libdir = "${STAGING_DIR_NATIVE}${layout_base_libdir}"
+libdir = "${STAGING_DIR_NATIVE}${libdir_native}"
-# Architecture independent paths
-export datadir = "${STAGING_DIR_NATIVE}${layout_datadir}"
-export sysconfdir = "${STAGING_DIR_NATIVE}${layout_sysconfdir}"
-export sharedstatedir = "${STAGING_DIR_NATIVE}${layout_sharedstatedir}"
-export localstatedir = "${STAGING_DIR_NATIVE}${layout_localstatedir}"
-export infodir = "${STAGING_DIR_NATIVE}${layout_infodir}"
-export mandir = "${STAGING_DIR_NATIVE}${layout_mandir}"
-export docdir = "${STAGING_DIR_NATIVE}${layout_docdir}"
-export servicedir = "${STAGING_DIR_NATIVE}${layout_servicedir}"
+baselib = "lib"
-# Architecture dependent paths
-export bindir = "${STAGING_DIR_NATIVE}${layout_bindir}"
-export sbindir = "${STAGING_DIR_NATIVE}${layout_sbindir}"
-export libexecdir = "${STAGING_DIR_NATIVE}${layout_libexecdir}"
-export libdir = "${STAGING_DIR_NATIVE}${layout_libdir}"
-export includedir = "${STAGING_DIR_NATIVE}${layout_includedir}"
-export oldincludedir = "${STAGING_DIR_NATIVE}${layout_includedir}"
+# Libtool's default paths are correct for the native machine
+lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
-do_stage () {
- if [ "${INHIBIT_NATIVE_STAGE_INSTALL}" != "1" ]
- then
- # If autotools is active, use the autotools staging function, else
- # use our "make install" equivalent
- if [ "${AUTOTOOLS_NATIVE_STAGE_INSTALL}" != "1" ]
- then
- oe_runmake install
- else
- autotools_stage_all
- fi
- fi
-}
+NATIVE_PACKAGE_PATH_SUFFIX ?= ""
+bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
-do_install () {
- true
-}
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}"
+do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}"
+
+# Since we actually install these into situ there is no staging prefix
+STAGING_DIR_HOST = ""
+STAGING_DIR_TARGET = ""
+SHLIBSDIR = "${STAGING_DIR_NATIVE}/shlibs"
+PKG_CONFIG_DIR = "${libdir}/pkgconfig"
+EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
PKG_CONFIG_SYSROOT_DIR = ""
-python __anonymous () {
- pn = bb.data.getVar("PN", d, True)
- depends = bb.data.getVar("DEPENDS", d, True)
- deps = bb.utils.explode_deps(depends)
- if "native" in (bb.data.getVar('BBCLASSEXTEND', d, True) or ""):
- autoextend = True
- else:
- autoextend = False
- for dep in deps:
- if dep.endswith("-cross"):
- if autoextend:
- depends = depends.replace(dep, dep.replace("-cross", "-native"))
- else:
- bb.note("%s has depends %s which ends in -cross?" % (pn, dep))
+# we dont want libc-uclibc or libc-glibc to kick in for native recipes
+LIBCOVERRIDE = ""
+CLASSOVERRIDE = "class-native"
+
+PATH =. "${COREBASE}/scripts/native-intercept:"
- if not dep.endswith("-native"):
- if autoextend:
- depends = depends.replace(dep, dep + "-native")
+python native_virtclass_handler () {
+ if not isinstance(e, bb.event.RecipePreFinalise):
+ return
+
+ classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
+ if "native" not in classextend:
+ return
+
+ pn = e.data.getVar("PN", True)
+ if not pn.endswith("-native"):
+ return
+
+ def map_dependencies(varname, d, suffix = ""):
+ if suffix:
+ varname = varname + "_" + suffix
+ deps = d.getVar(varname, True)
+ if not deps:
+ return
+ deps = bb.utils.explode_deps(deps)
+ newdeps = []
+ for dep in deps:
+ if dep.endswith("-cross"):
+ newdeps.append(dep.replace("-cross", "-native"))
+ elif not dep.endswith("-native"):
+ newdeps.append(dep + "-native")
else:
- bb.note("%s has depends %s which doesn't end in -native?" % (pn, dep))
- bb.data.setVar("DEPENDS", depends, d)
- provides = bb.data.getVar("PROVIDES", d, True)
+ newdeps.append(dep)
+ d.setVar(varname, " ".join(newdeps))
+
+ map_dependencies("DEPENDS", e.data)
+ for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
+ map_dependencies("RDEPENDS", e.data, pkg)
+ map_dependencies("RRECOMMENDS", e.data, pkg)
+ map_dependencies("RSUGGESTS", e.data, pkg)
+ map_dependencies("RPROVIDES", e.data, pkg)
+ map_dependencies("RREPLACES", e.data, pkg)
+
+ provides = e.data.getVar("PROVIDES", True)
for prov in provides.split():
if prov.find(pn) != -1:
continue
if not prov.endswith("-native"):
- if autoextend:
- provides = provides.replace(prov, prov + "-native")
- #else:
- # bb.note("%s has rouge PROVIDES of %s which doesn't end in -sdk?" % (pn, prov))
- bb.data.setVar("PROVIDES", provides, d)
- bb.data.setVar("OVERRIDES", bb.data.getVar("OVERRIDES", d, False) + ":virtclass-native", d)
+ provides = provides.replace(prov, prov + "-native")
+ e.data.setVar("PROVIDES", provides)
+
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
}
+addhandler native_virtclass_handler
+
+do_package[noexec] = "1"
+do_package_write_ipk[noexec] = "1"
+do_package_write_deb[noexec] = "1"
+do_package_write_rpm[noexec] = "1"
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/nativesdk.bbclass b/meta/classes/nativesdk.bbclass
new file mode 100644
index 000000000..a58fce204
--- /dev/null
+++ b/meta/classes/nativesdk.bbclass
@@ -0,0 +1,111 @@
+inherit relocatable
+
+# SDK packages are built either explicitly by the user,
+# or indirectly via dependency. No need to be in 'world'.
+EXCLUDE_FROM_WORLD = "1"
+
+STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
+
+# we dont want libc-uclibc or libc-glibc to kick in for nativesdk recipes
+LIBCOVERRIDE = ""
+CLASSOVERRIDE = "class-nativesdk"
+
+#
+# Update PACKAGE_ARCH and PACKAGE_ARCHS
+#
+PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
+PACKAGE_ARCHS = "${SDK_PACKAGE_ARCHS}"
+
+STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
+STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
+
+HOST_ARCH = "${SDK_ARCH}"
+HOST_VENDOR = "${SDK_VENDOR}"
+HOST_OS = "${SDK_OS}"
+HOST_PREFIX = "${SDK_PREFIX}"
+HOST_CC_ARCH = "${SDK_CC_ARCH}"
+HOST_LD_ARCH = "${SDK_LD_ARCH}"
+HOST_AS_ARCH = "${SDK_AS_ARCH}"
+#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
+
+TARGET_ARCH = "${SDK_ARCH}"
+TARGET_VENDOR = "${SDK_VENDOR}"
+TARGET_OS = "${SDK_OS}"
+TARGET_PREFIX = "${SDK_PREFIX}"
+TARGET_CC_ARCH = "${SDK_CC_ARCH}"
+TARGET_LD_ARCH = "${SDK_LD_ARCH}"
+TARGET_AS_ARCH = "${SDK_AS_ARCH}"
+TARGET_FPU = ""
+
+CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
+CFLAGS = "${BUILDSDK_CFLAGS}"
+CXXFLAGS = "${BUILDSDK_CFLAGS}"
+LDFLAGS = "${BUILDSDK_LDFLAGS}"
+
+# Change to place files in SDKPATH
+base_prefix = "${SDKPATHNATIVE}"
+prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
+baselib = "lib"
+
+export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
+export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
+
+PKGSUFFIX = "-nativesdk"
+
+python nativesdk_virtclass_handler () {
+ if not isinstance(e, bb.event.RecipePreFinalise):
+ return
+
+ pn = e.data.getVar("PN", True)
+ if not pn.endswith("-nativesdk"):
+ return
+
+ e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
+}
+
+python () {
+ pn = d.getVar("PN", True)
+ if not pn.endswith("-nativesdk"):
+ return
+
+ def map_dependencies(varname, d, suffix = ""):
+ if suffix:
+ varname = varname + "_" + suffix
+ deps = d.getVar(varname, True)
+ if not deps:
+ return
+ deps = bb.utils.explode_deps(deps)
+ newdeps = []
+ for dep in deps:
+ if dep.endswith("-native") or dep.endswith("-cross"):
+ newdeps.append(dep)
+ elif dep.endswith("-gcc-intermediate") or dep.endswith("-gcc-initial") or dep.endswith("-gcc") or dep.endswith("-g++"):
+ newdeps.append(dep + "-crosssdk")
+ elif not dep.endswith("-nativesdk"):
+ newdeps.append(dep.replace("-nativesdk", "") + "-nativesdk")
+ else:
+ newdeps.append(dep)
+ d.setVar(varname, " ".join(newdeps))
+
+ map_dependencies("DEPENDS", d)
+ #for pkg in (d.getVar("PACKAGES", True).split() + [""]):
+ # map_dependencies("RDEPENDS", d, pkg)
+ # map_dependencies("RRECOMMENDS", d, pkg)
+ # map_dependencies("RSUGGESTS", d, pkg)
+ # map_dependencies("RPROVIDES", d, pkg)
+ # map_dependencies("RREPLACES", d, pkg)
+
+ provides = d.getVar("PROVIDES", True)
+ for prov in provides.split():
+ if prov.find(pn) != -1:
+ continue
+ if not prov.endswith("-nativesdk"):
+ provides = provides.replace(prov, prov + "-nativesdk")
+ d.setVar("PROVIDES", provides)
+}
+
+addhandler nativesdk_virtclass_handler
+
+do_populate_sysroot[stamp-extra-info] = ""
+do_package[stamp-extra-info] = ""
diff --git a/meta/classes/openmoko-base.bbclass b/meta/classes/openmoko-base.bbclass
deleted file mode 100644
index 8643daa7a..000000000
--- a/meta/classes/openmoko-base.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-HOMEPAGE = "http://www.openmoko.org"
-LICENSE ?= "GPL"
-OPENMOKO_RELEASE ?= "OM-2007"
-OPENMOKO_MIRROR ?= "svn://svn.openmoko.org/trunk"
-
-def openmoko_base_get_subdir(d):
- import bb
- openmoko, section = bb.data.getVar('SECTION', d, 1).split("/")
- if section == 'base' or section == 'libs': return ""
- elif section in 'apps tools pim'.split(): return "applications"
- elif section == "panel-plugin": return "panel-plugins"
- elif section == "inputmethods": return "inputmethods"
- else: return section
-
-SUBDIR = "${@openmoko_base_get_subdir(d)}"
-
-SRC_URI := "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${PN};proto=http"
-S = "${WORKDIR}/${PN}"
-
-FILES_${PN} += "${datadir}/icons"
diff --git a/meta/classes/openmoko-panel-plugin.bbclass b/meta/classes/openmoko-panel-plugin.bbclass
deleted file mode 100644
index 6a22a92ac..000000000
--- a/meta/classes/openmoko-panel-plugin.bbclass
+++ /dev/null
@@ -1,6 +0,0 @@
-SECTION = "openmoko/panel-plugin"
-DEPENDS += "matchbox-panel-2 libmokopanelui2"
-
-inherit openmoko2
-
-FILES_${PN} = "${libdir}/matchbox-panel/lib*.so* ${datadir}"
diff --git a/meta/classes/openmoko.bbclass b/meta/classes/openmoko.bbclass
deleted file mode 100644
index 808ab8fcb..000000000
--- a/meta/classes/openmoko.bbclass
+++ /dev/null
@@ -1,3 +0,0 @@
-inherit openmoko-base autotools pkgconfig
-
-DEPENDS_prepend = "${@["openmoko-libs ", ""][(bb.data.getVar('PN', d, 1) == 'openmoko-libs')]}"
diff --git a/meta/classes/openmoko2.bbclass b/meta/classes/openmoko2.bbclass
deleted file mode 100644
index ef734e431..000000000
--- a/meta/classes/openmoko2.bbclass
+++ /dev/null
@@ -1,33 +0,0 @@
-inherit autotools pkgconfig
-
-HOMEPAGE = "http://www.openmoko.org"
-OPENMOKO_RELEASE ?= "OM-2007.2"
-OPENMOKO_MIRROR ?= "svn://svn.openmoko.org/trunk"
-
-def openmoko_two_get_license(d):
- import bb
- openmoko, section = bb.data.getVar('SECTION', d, 1).split("/")
- return "LGPL GPL".split()[section != "libs"]
-
-def openmoko_two_get_subdir(d):
- import bb
- openmoko, section = bb.data.getVar('SECTION', d, 1).split("/")
- if section == 'base': return ""
- elif section == 'libs': return "libraries"
- elif section in 'apps tools pim'.split(): return "applications"
- elif section == "panel-plugin": return "panel-plugins"
- elif section == "inputmethods": return "inputmethods"
- elif section == "daemons": return "daemons"
- elif section == "misc": return "misc"
- else: return section
-
-LICENSE = "${@openmoko_two_get_license(d)}"
-SUBDIR = "${@openmoko_two_get_subdir(d)}"
-
-SRC_URI := "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${PN};proto=http"
-S = "${WORKDIR}/${PN}"
-
-FILES_${PN} += "${datadir}/icons"
-
-SVNREV = "r${SRCREV}"
-#SVNREV = "${SRCDATE}"
diff --git a/meta/classes/own-mirrors.bbclass b/meta/classes/own-mirrors.bbclass
new file mode 100644
index 000000000..8a6feaf4d
--- /dev/null
+++ b/meta/classes/own-mirrors.bbclass
@@ -0,0 +1,12 @@
+PREMIRRORS() {
+cvs://.*/.* ${SOURCE_MIRROR_URL}
+svn://.*/.* ${SOURCE_MIRROR_URL}
+git://.*/.* ${SOURCE_MIRROR_URL}
+hg://.*/.* ${SOURCE_MIRROR_URL}
+bzr://.*/.* ${SOURCE_MIRROR_URL}
+svk://.*/.* ${SOURCE_MIRROR_URL}
+p4://.*/.* ${SOURCE_MIRROR_URL}
+osc://.*/.* ${SOURCE_MIRROR_URL}
+https?$://.*/.* ${SOURCE_MIRROR_URL}
+ftp://.*/.* ${SOURCE_MIRROR_URL}
+}
diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass
index df870142f..6fba5b690 100644
--- a/meta/classes/package.bbclass
+++ b/meta/classes/package.bbclass
@@ -1,10 +1,55 @@
#
-# General packaging help functions
+# Packaging process
#
+# Executive summary: This class iterates over the functions listed in PACKAGEFUNCS
+# Taking D and splitting it up into the packages listed in PACKAGES, placing the
+# resulting output in PKGDEST.
+#
+# There are the following default steps but PACKAGEFUNCS can be extended:
+#
+# a) package_get_auto_pr - get PRAUTO from remote PR service
+#
+# b) perform_packagecopy - Copy D into PKGD
+#
+# c) package_do_split_locales - Split out the locale files, updates FILES and PACKAGES
+#
+# d) split_and_strip_files - split the files into runtime and debug and strip them.
+# Debug files include debug info split, and associated sources that end up in -dbg packages
+#
+# e) fixup_perms - Fix up permissions in the package before we split it.
+#
+# f) populate_packages - Split the files in PKGD into separate packages in PKGDEST/<pkgname>
+# Also triggers the binary stripping code to put files in -dbg packages.
+#
+# g) package_do_filedeps - Collect perfile run-time dependency metadata
+# The data is stores in FILER{PROVIDES,DEPENDS}_file_pkg variables with
+# a list of affected files in FILER{PROVIDES,DEPENDS}FLIST_pkg
+#
+# h) package_do_shlibs - Look at the shared libraries generated and autotmatically add any
+# depenedencies found. Also stores the package name so anyone else using this library
+# knows which package to depend on.
+#
+# i) package_do_pkgconfig - Keep track of which packages need and provide which .pc files
+#
+# j) read_shlibdeps - Reads the stored shlibs information into the metadata
+#
+# k) package_depchains - Adds automatic dependencies to -dbg and -dev packages
+#
+# l) emit_pkgdata - saves the packaging data into PKGDATA_DIR for use in later
+# packaging steps
inherit packagedata
+inherit prserv
+
+PKGD = "${WORKDIR}/package"
+PKGDEST = "${WORKDIR}/packages-split"
+
+LOCALE_SECTION ?= ''
-PKGDEST = "${WORKDIR}/install"
+ALL_MULTILIB_PACKAGE_ARCHS = "${@all_multilib_tune_values(d, 'PACKAGE_ARCHS')}"
+
+# rpm is used for the per-file dependency identification
+PACKAGE_DEPENDS += "rpm-native"
def legitimize_package_name(s):
"""
@@ -28,14 +73,24 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
Used in .bb files to split up dynamically generated subpackages of a
given package, usually plugins or modules.
"""
- import os, os.path, bb
- dvar = bb.data.getVar('D', d, 1)
- if not dvar:
- bb.error("D not defined")
- return
+ ml = d.getVar("MLPREFIX", True)
+ if ml:
+ if not output_pattern.startswith(ml):
+ output_pattern = ml + output_pattern
+
+ newdeps = []
+ for dep in (extra_depends or "").split():
+ if dep.startswith(ml):
+ newdeps.append(dep)
+ else:
+ newdeps.append(ml + dep)
+ if newdeps:
+ extra_depends = " ".join(newdeps)
- packages = bb.data.getVar('PACKAGES', d, 1).split()
+ dvar = d.getVar('PKGD', True)
+
+ packages = d.getVar('PACKAGES', True).split()
if postinst:
postinst = '#!/bin/sh\n' + postinst + '\n'
@@ -52,16 +107,9 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
objs.append(relpath)
if extra_depends == None:
- # This is *really* broken
- mainpkg = packages[0]
- # At least try and patch it up I guess...
- if mainpkg.find('-dbg'):
- mainpkg = mainpkg.replace('-dbg', '')
- if mainpkg.find('-dev'):
- mainpkg = mainpkg.replace('-dev', '')
- extra_depends = mainpkg
+ extra_depends = d.getVar("PN", True)
- for o in objs:
+ for o in sorted(objs):
import re, stat
if match_path:
m = re.match(file_regex, o)
@@ -81,7 +129,7 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
packages = [pkg] + packages
else:
packages.append(pkg)
- oldfiles = bb.data.getVar('FILES_' + pkg, d, 1)
+ oldfiles = d.getVar('FILES_' + pkg, True)
if not oldfiles:
the_files = [os.path.join(root, o)]
if aux_files_pattern:
@@ -96,90 +144,164 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
the_files.append(fp % m.group(1))
else:
the_files.append(aux_files_pattern_verbatim % m.group(1))
- bb.data.setVar('FILES_' + pkg, " ".join(the_files), d)
+ d.setVar('FILES_' + pkg, " ".join(the_files))
if extra_depends != '':
- the_depends = bb.data.getVar('RDEPENDS_' + pkg, d, 1)
- if the_depends:
- the_depends = '%s %s' % (the_depends, extra_depends)
- else:
- the_depends = extra_depends
- bb.data.setVar('RDEPENDS_' + pkg, the_depends, d)
- bb.data.setVar('DESCRIPTION_' + pkg, description % on, d)
+ d.appendVar('RDEPENDS_' + pkg, ' ' + extra_depends)
+ d.setVar('DESCRIPTION_' + pkg, description % on)
if postinst:
- bb.data.setVar('pkg_postinst_' + pkg, postinst, d)
+ d.setVar('pkg_postinst_' + pkg, postinst)
if postrm:
- bb.data.setVar('pkg_postrm_' + pkg, postrm, d)
+ d.setVar('pkg_postrm_' + pkg, postrm)
else:
- bb.data.setVar('FILES_' + pkg, oldfiles + " " + os.path.join(root, o), d)
+ d.setVar('FILES_' + pkg, oldfiles + " " + os.path.join(root, o))
if callable(hook):
hook(f, pkg, file_regex, output_pattern, m.group(1))
- bb.data.setVar('PACKAGES', ' '.join(packages), d)
+ d.setVar('PACKAGES', ' '.join(packages))
PACKAGE_DEPENDS += "file-native"
python () {
- import bb
- if bb.data.getVar('PACKAGES', d, True) != '':
- deps = bb.data.getVarFlag('do_package', 'depends', d) or ""
- for dep in (bb.data.getVar('PACKAGE_DEPENDS', d, True) or "").split():
- deps += " %s:do_populate_staging" % dep
- bb.data.setVarFlag('do_package', 'depends', deps, d)
+ if d.getVar('PACKAGES', True) != '':
+ deps = ""
+ for dep in (d.getVar('PACKAGE_DEPENDS', True) or "").split():
+ deps += " %s:do_populate_sysroot" % dep
+ d.appendVarFlag('do_package', 'depends', deps)
- deps = (bb.data.getVarFlag('do_package', 'deptask', d) or "").split()
# shlibs requires any DEPENDS to have already packaged for the *.list files
- deps.append("do_package")
- bb.data.setVarFlag('do_package', 'deptask', " ".join(deps), d)
+ d.appendVarFlag('do_package', 'deptask', " do_package")
+
+ elif not bb.data.inherits_class('image', d):
+ d.setVar("PACKAGERDEPTASK", "")
}
-def runstrip(file, d):
- # Function to strip a single file, called from populate_packages below
+def splitfile(file, debugfile, debugsrcdir, d):
+ # Function to split a single file, called from split_and_strip_files below
# A working 'file' (one which works on the target architecture)
- # is necessary for this stuff to work, hence the addition to do_package[depends]
+ # is split and the split off portions go to debugfile.
+ #
+ # The debug information is then processed for src references. These
+ # references are copied to debugsrcdir, if defined.
- import bb, os, commands, stat
+ import commands, stat
- pathprefix = "export PATH=%s; " % bb.data.getVar('PATH', d, 1)
+ dvar = d.getVar('PKGD', True)
+ pathprefix = "export PATH=%s; " % d.getVar('PATH', True)
+ objcopy = d.getVar("OBJCOPY", True)
+ debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
+ workdir = d.getVar("WORKDIR", True)
+ workparentdir = os.path.dirname(workdir)
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
- ret, result = commands.getstatusoutput("%sfile '%s'" % (pathprefix, file))
+ # We ignore kernel modules, we don't generate debug info files.
+ if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
+ return 1
- if ret:
- bb.error("runstrip: 'file %s' failed (forced strip)" % file)
+ newmode = None
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+ origmode = os.stat(file)[stat.ST_MODE]
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+ os.chmod(file, newmode)
- if "not stripped" not in result:
- bb.debug(1, "runstrip: skip %s" % file)
- return 0
+ # We need to extract the debug src information here...
+ if debugsrcdir:
+ os.system("%s'%s' -b '%s' -d '%s' -i -l '%s' '%s'" % (pathprefix, debugedit, workparentdir, debugsrcdir, sourcefile, file))
- # If the file is in a .debug directory it was already stripped,
- # don't do it again...
- if os.path.dirname(file).endswith(".debug"):
- bb.note("Already ran strip")
- return 0
+ bb.mkdirhier(os.path.dirname(debugfile))
- strip = bb.data.getVar("STRIP", d, 1)
- objcopy = bb.data.getVar("OBJCOPY", d, 1)
+ os.system("%s'%s' --only-keep-debug '%s' '%s'" % (pathprefix, objcopy, file, debugfile))
+
+ # Set the debuglink to have the view of the file path on the target
+ os.system("%s'%s' --add-gnu-debuglink='%s' '%s'" % (pathprefix, objcopy, debugfile, file))
+
+ if newmode:
+ os.chmod(file, origmode)
+
+ return 0
+
+def splitfile2(debugsrcdir, d):
+ # Function to split a single file, called from split_and_strip_files below
+ #
+ # The debug src information processed in the splitfile2 is further procecessed
+ # and copied to the destination here.
+
+ import commands, stat
+
+ sourcefile = d.expand("${WORKDIR}/debugsources.list")
+ if debugsrcdir and os.path.isfile(sourcefile):
+ dvar = d.getVar('PKGD', True)
+ pathprefix = "export PATH=%s; " % d.getVar('PATH', True)
+ strip = d.getVar("STRIP", True)
+ objcopy = d.getVar("OBJCOPY", True)
+ debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
+ workdir = d.getVar("WORKDIR", True)
+ workparentdir = os.path.dirname(workdir)
+ workbasedir = os.path.basename(workdir)
+
+ nosuchdir = []
+ basepath = dvar
+ for p in debugsrcdir.split("/"):
+ basepath = basepath + "/" + p
+ if not os.path.exists(basepath):
+ nosuchdir.append(basepath)
+ bb.mkdirhier(basepath)
+
+ processdebugsrc = "LC_ALL=C ; sort -z -u '%s' | egrep -v -z '(<internal>|<built-in>)$' | "
+ # We need to ignore files that are not actually ours
+ # we do this by only paying attention to items from this package
+ processdebugsrc += "fgrep -z '%s' | "
+ processdebugsrc += "(cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)"
+
+ os.system(processdebugsrc % (sourcefile, workbasedir, workparentdir, dvar, debugsrcdir))
+
+ # The copy by cpio may have resulted in some empty directories! Remove these
+ for root, dirs, files in os.walk("%s%s" % (dvar, debugsrcdir)):
+ for d in dirs:
+ dir = os.path.join(root, d)
+ #bb.note("rmdir -p %s" % dir)
+ os.system("rmdir -p %s 2>/dev/null" % dir)
+
+ # Also remove debugsrcdir if its empty
+ for p in nosuchdir[::-1]:
+ if os.path.exists(p) and not os.listdir(p):
+ os.rmdir(p)
+
+def runstrip(file, elftype, d):
+ # Function to strip a single file, called from split_and_strip_files below
+ # A working 'file' (one which works on the target architecture)
+ #
+ # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+ # us what type of file we're processing...
+ # 4 - executable
+ # 8 - shared library
+
+ import commands, stat
+
+ pathprefix = "export PATH=%s; " % d.getVar('PATH', True)
+ strip = d.getVar("STRIP", True)
+
+ # Handle kernel modules specifically - .debug directories here are pointless
+ if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
+ return os.system("%s'%s' --strip-debug --remove-section=.comment --remove-section=.note --preserve-dates '%s'" % (pathprefix, strip, file))
newmode = None
- if not os.access(file, os.W_OK):
+ if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
origmode = os.stat(file)[stat.ST_MODE]
- newmode = origmode | stat.S_IWRITE
+ newmode = origmode | stat.S_IWRITE | stat.S_IREAD
os.chmod(file, newmode)
extraflags = ""
- if ".so" in file and "shared" in result:
+ # .so and shared library
+ if ".so" in file and elftype & 8:
extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
- elif "shared" in result or "executable" in result:
+ # shared or executable:
+ elif elftype & 8 or elftype & 4:
extraflags = "--remove-section=.comment --remove-section=.note"
- bb.mkdirhier(os.path.join(os.path.dirname(file), ".debug"))
- debugfile=os.path.join(os.path.dirname(file), ".debug", os.path.basename(file))
-
stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
bb.debug(1, "runstrip: %s" % stripcmd)
- os.system("%s'%s' --only-keep-debug '%s' '%s'" % (pathprefix, objcopy, file, debugfile))
ret = os.system("%s%s" % (pathprefix, stripcmd))
- os.system("%s'%s' --add-gnu-debuglink='%s' '%s'" % (pathprefix, objcopy, debugfile, file))
if newmode:
os.chmod(file, origmode)
@@ -187,16 +309,16 @@ def runstrip(file, d):
if ret:
bb.error("runstrip: '%s' strip command failed" % stripcmd)
- return 1
+ return 0
#
# Package data handling routines
#
def get_package_mapping (pkg, d):
- import bb, os
+ import oe.packagedata
- data = read_subpkgdata(pkg, d)
+ data = oe.packagedata.read_subpkgdata(pkg, d)
key = "PKG_%s" % pkg
if key in data:
@@ -205,51 +327,64 @@ def get_package_mapping (pkg, d):
return pkg
def runtime_mapping_rename (varname, d):
- import bb, os
-
- #bb.note("%s before: %s" % (varname, bb.data.getVar(varname, d, 1)))
+ #bb.note("%s before: %s" % (varname, d.getVar(varname, True)))
new_depends = []
- for depend in bb.utils.explode_deps(bb.data.getVar(varname, d, 1) or ""):
+ deps = bb.utils.explode_dep_versions(d.getVar(varname, True) or "")
+ for depend in deps:
# Have to be careful with any version component of the depend
- split_depend = depend.split(' (')
- new_depend = get_package_mapping(split_depend[0].strip(), d)
- if len(split_depend) > 1:
- new_depends.append("%s (%s" % (new_depend, split_depend[1]))
+ new_depend = get_package_mapping(depend, d)
+ if deps[depend]:
+ new_depends.append("%s (%s)" % (new_depend, deps[depend]))
else:
new_depends.append(new_depend)
- bb.data.setVar(varname, " ".join(new_depends) or None, d)
+ d.setVar(varname, " ".join(new_depends) or None)
- #bb.note("%s after: %s" % (varname, bb.data.getVar(varname, d, 1)))
+ #bb.note("%s after: %s" % (varname, d.getVar(varname, True)))
#
# Package functions suitable for inclusion in PACKAGEFUNCS
#
-python package_do_split_locales() {
- import os
+python package_get_auto_pr() {
+ # per recipe PRSERV_HOST PRSERV_PORT
+ pn = d.getVar('PN', True)
+ host = d.getVar("PRSERV_HOST_" + pn, True)
+ port = d.getVar("PRSERV_PORT_" + pn, True)
+ if not (host is None):
+ d.setVar("PRSERV_HOST", host)
+ if not (port is None):
+ d.setVar("PRSERV_PORT", port)
+ if d.getVar('USE_PR_SERV', True) != "0":
+ try:
+ auto_pr=prserv_get_pr_auto(d)
+ except Exception as e:
+ bb.fatal("Can NOT get PRAUTO, exception %s" % str(e))
+ return
+ if auto_pr is None:
+ if d.getVar('PRSERV_LOCKDOWN', True):
+ bb.fatal("Can NOT get PRAUTO from lockdown exported file")
+ else:
+ bb.fatal("Can NOT get PRAUTO from remote PR service")
+ return
+ d.setVar('PRAUTO',str(auto_pr))
+}
- if (bb.data.getVar('PACKAGE_NO_LOCALE', d, 1) == '1'):
+python package_do_split_locales() {
+ if (d.getVar('PACKAGE_NO_LOCALE', True) == '1'):
bb.debug(1, "package requested not splitting locales")
return
- packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
+ packages = (d.getVar('PACKAGES', True) or "").split()
- datadir = bb.data.getVar('datadir', d, 1)
+ datadir = d.getVar('datadir', True)
if not datadir:
bb.note("datadir not defined")
return
- dvar = bb.data.getVar('D', d, 1)
- if not dvar:
- bb.error("D not defined")
- return
-
- pn = bb.data.getVar('PN', d, 1)
- if not pn:
- bb.error("PN not defined")
- return
+ dvar = d.getVar('PKGD', True)
+ pn = d.getVar('PN', True)
if pn + '-locale' in packages:
packages.remove(pn + '-locale')
@@ -262,24 +397,23 @@ python package_do_split_locales() {
locales = os.listdir(localedir)
- # This is *really* broken
- mainpkg = packages[0]
- # At least try and patch it up I guess...
- if mainpkg.find('-dbg'):
- mainpkg = mainpkg.replace('-dbg', '')
- if mainpkg.find('-dev'):
- mainpkg = mainpkg.replace('-dev', '')
-
- for l in locales:
+ summary = d.getVar('SUMMARY', True) or pn
+ description = d.getVar('DESCRIPTION', True) or ""
+ locale_section = d.getVar('LOCALE_SECTION', True)
+ mlprefix = d.getVar('MLPREFIX', True) or ""
+ for l in sorted(locales):
ln = legitimize_package_name(l)
pkg = pn + '-locale-' + ln
packages.append(pkg)
- bb.data.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l), d)
- bb.data.setVar('RDEPENDS_' + pkg, '%s virtual-locale-%s' % (mainpkg, ln), d)
- bb.data.setVar('RPROVIDES_' + pkg, '%s-locale %s-translation' % (pn, ln), d)
- bb.data.setVar('DESCRIPTION_' + pkg, '%s translation for %s' % (l, pn), d)
+ d.setVar('FILES_' + pkg, os.path.join(datadir, 'locale', l))
+ d.setVar('RDEPENDS_' + pkg, '%s %svirtual-locale-%s' % (pn, mlprefix, ln))
+ d.setVar('RPROVIDES_' + pkg, '%s-locale %s%s-translation' % (pn, mlprefix, ln))
+ d.setVar('SUMMARY_' + pkg, '%s - %s translations' % (summary, l))
+ d.setVar('DESCRIPTION_' + pkg, '%s This package contains language translation files for the %s locale.' % (description, l))
+ if locale_section:
+ d.setVar('SECTION_' + pkg, locale_section)
- bb.data.setVar('PACKAGES', ' '.join(packages), d)
+ d.setVar('PACKAGES', ' '.join(packages))
# Disabled by RP 18/06/07
# Wildcards aren't supported in debian
@@ -287,68 +421,481 @@ python package_do_split_locales() {
# glibc-localedata-translit* won't install as a dependency
# for some other package which breaks meta-toolchain
# Probably breaks since virtual-locale- isn't provided anywhere
- #rdep = (bb.data.getVar('RDEPENDS_%s' % mainpkg, d, 1) or bb.data.getVar('RDEPENDS', d, 1) or "").split()
+ #rdep = (d.getVar('RDEPENDS_%s' % pn, True) or d.getVar('RDEPENDS', True) or "").split()
#rdep.append('%s-locale*' % pn)
- #bb.data.setVar('RDEPENDS_%s' % mainpkg, ' '.join(rdep), d)
+ #d.setVar('RDEPENDS_%s' % pn, ' '.join(rdep))
}
-python populate_packages () {
- import glob, stat, errno, re
+python perform_packagecopy () {
+ dest = d.getVar('D', True)
+ dvar = d.getVar('PKGD', True)
- workdir = bb.data.getVar('WORKDIR', d, 1)
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
+ bb.mkdirhier(dvar)
- import os # path manipulations
- outdir = bb.data.getVar('DEPLOY_DIR', d, 1)
- if not outdir:
- bb.error("DEPLOY_DIR not defined, unable to package")
- return
- bb.mkdirhier(outdir)
+ # Start by package population by taking a copy of the installed
+ # files to operate on
+ os.system('rm -rf %s/*' % (dvar))
+ # Preserve sparse files and hard links
+ os.system('tar -cf - -C %s -ps . | tar -xf - -C %s' % (dest, dvar))
+}
- dvar = bb.data.getVar('D', d, 1)
- if not dvar:
- bb.error("D not defined, unable to package")
- return
- bb.mkdirhier(dvar)
+# We generate a master list of directories to process, we start by
+# seeding this list with reasonable defaults, then load from
+# the fs-perms.txt files
+python fixup_perms () {
+ import os, pwd, grp
- packages = bb.data.getVar('PACKAGES', d, 1)
+ # init using a string with the same format as a line as documented in
+ # the fs-perms.txt file
+ # <path> <mode> <uid> <gid> <walk> <fmode> <fuid> <fgid>
+ # <path> link <link target>
+ #
+ # __str__ can be used to print out an entry in the input format
+ #
+ # if fs_perms_entry.path is None:
+ # an error occured
+ # if fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.link = target of link
+ # if not fs_perms_entry.link, you can retrieve:
+ # fs_perms_entry.path = path
+ # fs_perms_entry.mode = expected dir mode or None
+ # fs_perms_entry.uid = expected uid or -1
+ # fs_perms_entry.gid = expected gid or -1
+ # fs_perms_entry.walk = 'true' or something else
+ # fs_perms_entry.fmode = expected file mode or None
+ # fs_perms_entry.fuid = expected file uid or -1
+ # fs_perms_entry_fgid = expected file gid or -1
+ class fs_perms_entry():
+ def __init__(self, line):
+ lsplit = line.split()
+ if len(lsplit) == 3 and lsplit[1].lower() == "link":
+ self._setlink(lsplit[0], lsplit[2])
+ elif len(lsplit) == 8:
+ self._setdir(lsplit[0], lsplit[1], lsplit[2], lsplit[3], lsplit[4], lsplit[5], lsplit[6], lsplit[7])
+ else:
+ bb.error("Fixup Perms: invalid config line %s" % line)
+ self.path = None
+ self.link = None
- pn = bb.data.getVar('PN', d, 1)
- if not pn:
- bb.error("PN not defined")
- return
+ def _setdir(self, path, mode, uid, gid, walk, fmode, fuid, fgid):
+ self.path = os.path.normpath(path)
+ self.link = None
+ self.mode = self._procmode(mode)
+ self.uid = self._procuid(uid)
+ self.gid = self._procgid(gid)
+ self.walk = walk.lower()
+ self.fmode = self._procmode(fmode)
+ self.fuid = self._procuid(fuid)
+ self.fgid = self._procgid(fgid)
- os.chdir(dvar)
+ def _setlink(self, path, link):
+ self.path = os.path.normpath(path)
+ self.link = link
- def isexec(path):
- try:
- s = os.stat(path)
- except (os.error, AttributeError):
- return 0
- return (s[stat.ST_MODE] & stat.S_IEXEC)
+ def _procmode(self, mode):
+ if not mode or (mode and mode == "-"):
+ return None
+ else:
+ return int(mode,8)
- # Sanity check PACKAGES for duplicates - should be moved to
- # sanity.bbclass once we have the infrastucture
- package_list = []
- for pkg in packages.split():
- if pkg in package_list:
- bb.error("-------------------")
- bb.error("%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg)
- bb.error("Please fix the metadata/report this as bug to OE bugtracker.")
- bb.error("-------------------")
+ # Note uid/gid -1 has special significance in os.lchown
+ def _procuid(self, uid):
+ if uid is None or uid == "-":
+ return -1
+ elif uid.isdigit():
+ return int(uid)
+ else:
+ return pwd.getpwnam(uid).pw_uid
+
+ def _procgid(self, gid):
+ if gid is None or gid == "-":
+ return -1
+ elif gid.isdigit():
+ return int(gid)
+ else:
+ return grp.getgrnam(gid).gr_gid
+
+ # Use for debugging the entries
+ def __str__(self):
+ if self.link:
+ return "%s link %s" % (self.path, self.link)
+ else:
+ mode = "-"
+ if self.mode:
+ mode = "0%o" % self.mode
+ fmode = "-"
+ if self.fmode:
+ fmode = "0%o" % self.fmode
+ uid = self._mapugid(self.uid)
+ gid = self._mapugid(self.gid)
+ fuid = self._mapugid(self.fuid)
+ fgid = self._mapugid(self.fgid)
+ return "%s %s %s %s %s %s %s %s" % (self.path, mode, uid, gid, self.walk, fmode, fuid, fgid)
+
+ def _mapugid(self, id):
+ if id is None or id == -1:
+ return "-"
+ else:
+ return "%d" % id
+
+ # Fix the permission, owner and group of path
+ def fix_perms(path, mode, uid, gid, dir):
+ if mode and not os.path.islink(path):
+ #bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
+ os.chmod(path, mode)
+ # -1 is a special value that means don't change the uid/gid
+ # if they are BOTH -1, don't bother to lchown
+ if not (uid == -1 and gid == -1):
+ #bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
+ os.lchown(path, uid, gid)
+
+ # Return a list of configuration files based on either the default
+ # files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES
+ # paths are resolved via BBPATH
+ def get_fs_perms_list(d):
+ str = ""
+ fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES', True)
+ if not fs_perms_tables:
+ fs_perms_tables = 'files/fs-perms.txt'
+ for conf_file in fs_perms_tables.split():
+ str += " %s" % bb.which(d.getVar('BBPATH', True), conf_file)
+ return str
+
+
+
+ dvar = d.getVar('PKGD', True)
+
+ fs_perms_table = {}
+
+ # By default all of the standard directories specified in
+ # bitbake.conf will get 0755 root:root.
+ target_path_vars = [ 'base_prefix',
+ 'prefix',
+ 'exec_prefix',
+ 'base_bindir',
+ 'base_sbindir',
+ 'base_libdir',
+ 'datadir',
+ 'sysconfdir',
+ 'servicedir',
+ 'sharedstatedir',
+ 'localstatedir',
+ 'infodir',
+ 'mandir',
+ 'docdir',
+ 'bindir',
+ 'sbindir',
+ 'libexecdir',
+ 'libdir',
+ 'includedir',
+ 'oldincludedir' ]
+
+ for path in target_path_vars:
+ dir = d.getVar(path, True) or ""
+ if dir == "":
+ continue
+ fs_perms_table[dir] = fs_perms_entry(bb.data.expand("%s 0755 root root false - - -" % (dir), d))
+
+ # Now we actually load from the configuration files
+ for conf in get_fs_perms_list(d).split():
+ if os.path.exists(conf):
+ f = open(conf)
+ for line in f:
+ if line.startswith('#'):
+ continue
+ lsplit = line.split()
+ if len(lsplit) == 0:
+ continue
+ if len(lsplit) != 8 and not (len(lsplit) == 3 and lsplit[1].lower() == "link"):
+ bb.error("Fixup perms: %s invalid line: %s" % (conf, line))
+ continue
+ entry = fs_perms_entry(d.expand(line))
+ if entry and entry.path:
+ fs_perms_table[entry.path] = entry
+ f.close()
+
+ # Debug -- list out in-memory table
+ #for dir in fs_perms_table:
+ # bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
+
+ # We process links first, so we can go back and fixup directory ownership
+ # for any newly created directories
+ for dir in fs_perms_table:
+ if not fs_perms_table[dir].link:
+ continue
+
+ origin = dvar + dir
+ if not (os.path.exists(origin) and os.path.isdir(origin) and not os.path.islink(origin)):
+ continue
+
+ link = fs_perms_table[dir].link
+ if link[0] == "/":
+ target = dvar + link
+ ptarget = link
else:
- package_list.append(pkg)
+ target = os.path.join(os.path.dirname(origin), link)
+ ptarget = os.path.join(os.path.dirname(dir), link)
+ if os.path.exists(target):
+ bb.error("Fixup Perms: Unable to correct directory link, target already exists: %s -> %s" % (dir, ptarget))
+ continue
+
+ # Create path to move directory to, move it, and then setup the symlink
+ bb.mkdirhier(os.path.dirname(target))
+ #bb.note("Fixup Perms: Rename %s -> %s" % (dir, ptarget))
+ os.rename(origin, target)
+ #bb.note("Fixup Perms: Link %s -> %s" % (dir, link))
+ os.symlink(link, origin)
+
+ for dir in fs_perms_table:
+ if fs_perms_table[dir].link:
+ continue
+
+ origin = dvar + dir
+ if not (os.path.exists(origin) and os.path.isdir(origin)):
+ continue
+
+ fix_perms(origin, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+
+ if fs_perms_table[dir].walk == 'true':
+ for root, dirs, files in os.walk(origin):
+ for dr in dirs:
+ each_dir = os.path.join(root, dr)
+ fix_perms(each_dir, fs_perms_table[dir].mode, fs_perms_table[dir].uid, fs_perms_table[dir].gid, dir)
+ for f in files:
+ each_file = os.path.join(root, f)
+ fix_perms(each_file, fs_perms_table[dir].fmode, fs_perms_table[dir].fuid, fs_perms_table[dir].fgid, dir)
+}
+
+python split_and_strip_files () {
+ import commands, stat, errno
+
+ dvar = d.getVar('PKGD', True)
+ pn = d.getVar('PN', True)
+
+ # We default to '.debug' style
+ if d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory':
+ # Single debug-file-directory style debug info
+ debugappend = ".debug"
+ debugdir = ""
+ debuglibdir = "/usr/lib/debug"
+ debugsrcdir = "/usr/src/debug"
+ else:
+ # Original OE-core, a.k.a. ".debug", style debug info
+ debugappend = ""
+ debugdir = "/.debug"
+ debuglibdir = ""
+ debugsrcdir = "/usr/src/debug"
+
+ os.chdir(dvar)
- if (bb.data.getVar('INHIBIT_PACKAGE_STRIP', d, 1) != '1'):
+ # Return type (bits):
+ # 0 - not elf
+ # 1 - ELF
+ # 2 - stripped
+ # 4 - executable
+ # 8 - shared library
+ def isELF(path):
+ type = 0
+ pathprefix = "export PATH=%s; " % d.getVar('PATH', True)
+ ret, result = commands.getstatusoutput("%sfile '%s'" % (pathprefix, path))
+
+ if ret:
+ bb.error("split_and_strip_files: 'file %s' failed" % path)
+ return type
+
+ # Not stripped
+ if "ELF" in result:
+ type |= 1
+ if "not stripped" not in result:
+ type |= 2
+ if "executable" in result:
+ type |= 4
+ if "shared" in result:
+ type |= 8
+ return type
+
+
+ #
+ # First lets figure out all of the files we may have to process ... do this only once!
+ #
+ file_list = {}
+ file_links = {}
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1') and \
+ (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
for root, dirs, files in os.walk(dvar):
for f in files:
file = os.path.join(root, f)
- if not os.path.islink(file) and not os.path.isdir(file) and isexec(file):
- runstrip(file, d)
+ # Only process files (and symlinks)... Skip files that are obviously debug files
+ if not (debugappend != "" and file.endswith(debugappend)) and \
+ not (debugdir != "" and debugdir in os.path.dirname(file[len(dvar):])) and \
+ os.path.isfile(file):
+ try:
+ s = os.stat(file)
+ except OSError, (err, strerror):
+ if err != errno.ENOENT:
+ raise
+ # Skip broken symlinks
+ continue
+ # Is the item excutable? Then we need to process it.
+ if (s[stat.ST_MODE] & stat.S_IXUSR) or \
+ (s[stat.ST_MODE] & stat.S_IXGRP) or \
+ (s[stat.ST_MODE] & stat.S_IXOTH):
+ # If it's a symlink, and points to an ELF file, we capture the readlink target
+ if os.path.islink(file):
+ target = os.readlink(file)
+ if not os.path.isabs(target):
+ ltarget = os.path.join(os.path.dirname(file), target)
+ else:
+ ltarget = target
+
+ if isELF(ltarget):
+ #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
+ file_list[file] = "sym: " + target
+ continue
+ # It's a file (or hardlink), not a link
+ # ...but is it ELF, and is it already stripped?
+ elf_file = isELF(file)
+ if elf_file & 1:
+ # Check if it's a hard link to something else
+ if s.st_nlink > 1:
+ file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+ # Hard link to something else
+ file_list[file] = "hard: " + file_reference
+ continue
+
+ file_list[file] = "ELF: %d" % elf_file
+
+
+ #
+ # First lets process debug splitting
+ #
+ if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT', True) != '1'):
+ for file in file_list:
+ src = file[len(dvar):]
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ fpath = dvar + dest
+ # Preserve symlinks in debug area...
+ if file_list[file].startswith("sym: "):
+ ltarget = file_list[file][5:]
+ lpath = os.path.dirname(ltarget)
+ lbase = os.path.basename(ltarget)
+ ftarget = ""
+ if lpath and lpath != ".":
+ ftarget += lpath + debugdir + "/"
+ ftarget += lbase + debugappend
+ if lpath.startswith(".."):
+ ftarget = os.path.join("..", ftarget)
+ bb.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Symlink %s -> %s" % (fpath, ftarget))
+ os.symlink(ftarget, fpath)
+ continue
+
+ # Preserve hard links in debug area...
+ file_reference = ""
+ if file_list[file].startswith("hard: "):
+ file_reference = file_list[file][6:]
+ if file_reference not in file_links:
+ # If this is a new file, add it as a reference, and
+ # update it's type, so we can fall through and split
+ file_list[file] = "ELF: %d" % (isELF(file))
+ else:
+ target = file_links[file_reference][len(dvar):]
+ ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
+ bb.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Link %s -> %s" % (fpath, ftarget))
+ os.link(ftarget, fpath)
+ continue
+
+ # It's ELF...
+ if file_list[file].startswith("ELF: "):
+ elf_file = int(file_list[file][5:])
+ if elf_file & 2:
+ bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (src, pn))
+ continue
+
+ # Split the file...
+ bb.mkdirhier(os.path.dirname(fpath))
+ #bb.note("Split %s -> %s" % (file, fpath))
+ # Only store off the hard link reference if we successfully split!
+ if splitfile(file, fpath, debugsrcdir, d) == 0 and file_reference != "":
+ file_links[file_reference] = file
+
+ # The above may have generated dangling symlinks, remove them!
+ # Dangling symlinks are a result of something NOT being split, such as a stripped binary.
+ # This should be a rare occurance, but we want to clean up anyway.
+ for file in file_list:
+ if file_list[file].startswith("sym: "):
+ src = file[len(dvar):]
+ dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+ fpath = dvar + dest
+ try:
+ s = os.stat(fpath)
+ except OSError, (err, strerror):
+ if err != errno.ENOENT:
+ raise
+ #bb.note("Remove dangling link %s -> %s" % (fpath, os.readlink(fpath)))
+ os.unlink(fpath)
+ # This could leave an empty debug directory laying around
+ # take care of the obvious case...
+ os.system("rmdir %s 2>/dev/null" % os.path.dirname(fpath))
+
+ # Process the debugsrcdir if requested...
+ # This copies and places the referenced sources for later debugging...
+ splitfile2(debugsrcdir, d)
+ #
+ # End of debug splitting
+ #
+
+ #
+ # Now lets go back over things and strip them
+ #
+ if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
+ for file in file_list:
+ if file_list[file].startswith("ELF: "):
+ elf_file = int(file_list[file][5:])
+ #bb.note("Strip %s" % file)
+ runstrip(file, elf_file, d)
+
+
+ if (d.getVar('INHIBIT_PACKAGE_STRIP', True) != '1'):
+ for root, dirs, files in os.walk(dvar):
+ for f in files:
+ if not f.endswith(".ko"):
+ continue
+ runstrip(os.path.join(root, f), None, d)
+ #
+ # End of strip
+ #
+}
- pkgdest = bb.data.getVar('PKGDEST', d, 1)
+python populate_packages () {
+ import glob, stat, errno, re
+
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('DEPLOY_DIR', True)
+ dvar = d.getVar('PKGD', True)
+ packages = d.getVar('PACKAGES', True)
+ pn = d.getVar('PN', True)
+
+ bb.mkdirhier(outdir)
+ os.chdir(dvar)
+
+ # Sanity check PACKAGES for duplicates and for LICENSE_EXCLUSION
+ # Sanity should be moved to sanity.bbclass once we have the infrastucture
+ package_list = []
+
+ for pkg in packages.split():
+ if d.getVar('LICENSE_EXCLUSION-' + pkg, True):
+ bb.warn("%s has an incompatible license. Excluding from packaging." % pkg)
+ packages.remove(pkg)
+ else:
+ if pkg in package_list:
+ bb.error("%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg)
+ else:
+ package_list.append(pkg)
+ d.setVar('PACKAGES', ' '.join(package_list))
+ pkgdest = d.getVar('PKGDEST', True)
os.system('rm -rf %s' % pkgdest)
seen = []
@@ -358,15 +905,16 @@ python populate_packages () {
root = os.path.join(pkgdest, pkg)
bb.mkdirhier(root)
- bb.data.setVar('PKG', pkg, localdata)
- overrides = bb.data.getVar('OVERRIDES', localdata, 1)
+ localdata.setVar('PKG', pkg)
+ overrides = localdata.getVar('OVERRIDES', True)
if not overrides:
raise bb.build.FuncFailed('OVERRIDES not defined')
- bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+ localdata.setVar('OVERRIDES', overrides + ':' + pkg)
bb.data.update_data(localdata)
- filesvar = bb.data.getVar('FILES', localdata, 1) or ""
+ filesvar = localdata.getVar('FILES', True) or ""
files = filesvar.split()
+ file_links = {}
for file in files:
if os.path.isabs(file):
file = '.' + file
@@ -386,37 +934,66 @@ python populate_packages () {
if file in seen:
continue
seen.append(file)
+
+ def mkdir(src, dest, p):
+ src = os.path.join(src, p)
+ dest = os.path.join(dest, p)
+ bb.mkdirhier(dest)
+ fstat = os.stat(src)
+ os.chmod(dest, fstat.st_mode)
+ os.chown(dest, fstat.st_uid, fstat.st_gid)
+ if p not in seen:
+ seen.append(p)
+
+ def mkdir_recurse(src, dest, paths):
+ while paths.startswith("./"):
+ paths = paths[2:]
+ p = "."
+ for c in paths.split("/"):
+ p = os.path.join(p, c)
+ if not os.path.exists(os.path.join(dest, p)):
+ mkdir(src, dest, p)
+
if os.path.isdir(file) and not os.path.islink(file):
- bb.mkdirhier(os.path.join(root,file))
- os.chmod(os.path.join(root,file), os.stat(file).st_mode)
+ mkdir_recurse(dvar, root, file)
continue
+
+ mkdir_recurse(dvar, root, os.path.dirname(file))
fpath = os.path.join(root,file)
- dpath = os.path.dirname(fpath)
- bb.mkdirhier(dpath)
+ if not os.path.islink(file):
+ os.link(file, fpath)
+ fstat = os.stat(file)
+ os.chmod(fpath, fstat.st_mode)
+ os.chown(fpath, fstat.st_uid, fstat.st_gid)
+ continue
ret = bb.copyfile(file, fpath)
if ret is False or ret == 0:
raise bb.build.FuncFailed("File population failed")
+
del localdata
os.chdir(workdir)
unshipped = []
for root, dirs, files in os.walk(dvar):
- for f in files:
- path = os.path.join(root[len(dvar):], f)
+ dir = root[len(dvar):]
+ if not dir:
+ dir = os.sep
+ for f in (files + dirs):
+ path = os.path.join(dir, f)
if ('.' + path) not in seen:
unshipped.append(path)
if unshipped != []:
- bb.note("the following files were installed but not shipped in any package:")
+ bb.warn("For recipe %s, the following files/directories were installed but not shipped in any package:" % pn)
for f in unshipped:
- bb.note(" " + f)
+ bb.warn(" " + f)
bb.build.exec_func("package_name_hook", d)
for pkg in package_list:
- pkgname = bb.data.getVar('PKG_%s' % pkg, d, 1)
+ pkgname = d.getVar('PKG_%s' % pkg, True)
if pkgname is None:
- bb.data.setVar('PKG_%s' % pkg, pkg, d)
+ d.setVar('PKG_%s' % pkg, pkg)
dangling_links = {}
pkg_files = {}
@@ -440,7 +1017,8 @@ python populate_packages () {
dangling_links[pkg].append(os.path.normpath(target))
for pkg in package_list:
- rdepends = bb.utils.explode_deps(bb.data.getVar('RDEPENDS_' + pkg, d, 1) or bb.data.getVar('RDEPENDS', d, 1) or "")
+ rdepends = bb.utils.explode_dep_versions(d.getVar('RDEPENDS_' + pkg, True) or d.getVar('RDEPENDS', True) or "")
+
for l in dangling_links[pkg]:
found = False
bb.debug(1, "%s contains dangling link %s" % (pkg, l))
@@ -451,15 +1029,17 @@ python populate_packages () {
bb.debug(1, "target found in %s" % p)
if p == pkg:
break
- if not p in rdepends:
- rdepends.append(p)
+ if p not in rdepends:
+ rdepends[p] = ""
break
if found == False:
bb.note("%s contains dangling symlink to %s" % (pkg, l))
- bb.data.setVar('RDEPENDS_' + pkg, " " + " ".join(rdepends), d)
+ d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
populate_packages[dirs] = "${D}"
+PKGDESTWORK = "${WORKDIR}/pkgdata"
+
python emit_pkgdata() {
from glob import glob
@@ -469,43 +1049,55 @@ python emit_pkgdata() {
c = codecs.getencoder("string_escape")
return c(str)[0]
- val = bb.data.getVar('%s_%s' % (var, pkg), d, 1)
+ val = d.getVar('%s_%s' % (var, pkg), True)
if val:
f.write('%s_%s: %s\n' % (var, pkg, encode(val)))
return
- val = bb.data.getVar('%s' % (var), d, 1)
+ val = d.getVar('%s' % (var), True)
if val:
f.write('%s: %s\n' % (var, encode(val)))
return
- packages = bb.data.getVar('PACKAGES', d, True)
- pkgdatadir = bb.data.getVar('PKGDATA_DIR', d, True)
+ def get_directory_size(dir):
+ if os.listdir(dir):
+ size = int(os.popen('du -sk %s' % dir).readlines()[0].split('\t')[0])
+ else:
+ size = 0
+ return size
+
+ packages = d.getVar('PACKAGES', True)
+ pkgdest = d.getVar('PKGDEST', True)
+ pkgdatadir = d.getVar('PKGDESTWORK', True)
- pstageactive = bb.data.getVar('PSTAGING_ACTIVE', d, True)
- if pstageactive == "1":
- lf = bb.utils.lockfile(bb.data.expand("${STAGING_DIR}/staging.lock", d))
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
- data_file = pkgdatadir + bb.data.expand("/${PN}" , d)
+ data_file = pkgdatadir + d.expand("/${PN}" )
f = open(data_file, 'w')
f.write("PACKAGES: %s\n" % packages)
f.close()
- package_stagefile(data_file, d)
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ workdir = d.getVar('WORKDIR', True)
for pkg in packages.split():
subdata_file = pkgdatadir + "/runtime/%s" % pkg
+
sf = open(subdata_file, 'w')
write_if_exists(sf, pkg, 'PN')
write_if_exists(sf, pkg, 'PV')
write_if_exists(sf, pkg, 'PR')
+ write_if_exists(sf, pkg, 'PKGV')
+ write_if_exists(sf, pkg, 'PKGR')
+ write_if_exists(sf, pkg, 'LICENSE')
write_if_exists(sf, pkg, 'DESCRIPTION')
+ write_if_exists(sf, pkg, 'SUMMARY')
write_if_exists(sf, pkg, 'RDEPENDS')
write_if_exists(sf, pkg, 'RPROVIDES')
write_if_exists(sf, pkg, 'RRECOMMENDS')
write_if_exists(sf, pkg, 'RSUGGESTS')
write_if_exists(sf, pkg, 'RREPLACES')
write_if_exists(sf, pkg, 'RCONFLICTS')
+ write_if_exists(sf, pkg, 'SECTION')
write_if_exists(sf, pkg, 'PKG')
write_if_exists(sf, pkg, 'ALLOW_EMPTY')
write_if_exists(sf, pkg, 'FILES')
@@ -513,26 +1105,31 @@ python emit_pkgdata() {
write_if_exists(sf, pkg, 'pkg_postrm')
write_if_exists(sf, pkg, 'pkg_preinst')
write_if_exists(sf, pkg, 'pkg_prerm')
+ write_if_exists(sf, pkg, 'FILERPROVIDESFLIST')
+ for dfile in (d.getVar('FILERPROVIDESFLIST_' + pkg, True) or "").split():
+ write_if_exists(sf, pkg, 'FILERPROVIDES_' + dfile)
+
+ write_if_exists(sf, pkg, 'FILERDEPENDSFLIST')
+ for dfile in (d.getVar('FILERDEPENDSFLIST_' + pkg, True) or "").split():
+ write_if_exists(sf, pkg, 'FILERDEPENDS_' + dfile)
+
+ sf.write('%s_%s: %s\n' % ('PKGSIZE', pkg, get_directory_size(pkgdest + "/%s" % pkg)))
sf.close()
- package_stagefile(subdata_file, d)
- #if pkgdatadir2:
- # bb.copyfile(subdata_file, pkgdatadir2 + "/runtime/%s" % pkg)
- allow_empty = bb.data.getVar('ALLOW_EMPTY_%s' % pkg, d, 1)
+ allow_empty = d.getVar('ALLOW_EMPTY_%s' % pkg, True)
if not allow_empty:
- allow_empty = bb.data.getVar('ALLOW_EMPTY', d, 1)
- root = "%s/install/%s" % (workdir, pkg)
+ allow_empty = d.getVar('ALLOW_EMPTY', True)
+ root = "%s/%s" % (pkgdest, pkg)
os.chdir(root)
g = glob('*')
if g or allow_empty == "1":
packagedfile = pkgdatadir + '/runtime/%s.packaged' % pkg
file(packagedfile, 'w').close()
- package_stagefile(packagedfile, d)
- if pstageactive == "1":
- bb.utils.unlockfile(lf)
+
+ bb.utils.unlockfile(lf)
}
-emit_pkgdata[dirs] = "${PKGDATA_DIR}/runtime"
+emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime"
ldconfig_postinst_fragment() {
if [ x"$D" = "x" ]; then
@@ -540,12 +1137,100 @@ if [ x"$D" = "x" ]; then
fi
}
+RPMDEPS = "${STAGING_LIBDIR_NATIVE}/rpm/bin/rpmdeps-oecore --macros ${STAGING_LIBDIR_NATIVE}/rpm/macros --define '_rpmfc_magic_path ${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc' --rpmpopt ${STAGING_LIBDIR_NATIVE}/rpm/rpmpopt"
+
+# Collect perfile run-time dependency metadata
+# Output:
+# FILERPROVIDESFLIST_pkg - list of all files w/ deps
+# FILERPROVIDES_filepath_pkg - per file dep
+#
+# FILERDEPENDSFLIST_pkg - list of all files w/ deps
+# FILERDEPENDS_filepath_pkg - per file dep
+
+python package_do_filedeps() {
+ import re
+
+ pkgdest = d.getVar('PKGDEST', True)
+ packages = d.getVar('PACKAGES', True)
+
+ rpmdeps = d.expand("${RPMDEPS}")
+ r = re.compile(r'[<>=]+ +[^ ]*')
+
+ # Quick routine to process the results of the rpmdeps call...
+ def process_deps(pipe, pkg, provides_files, requires_files):
+ provides = {}
+ requires = {}
+
+ for line in pipe:
+ f = line.split(" ", 1)[0].strip()
+ line = line.split(" ", 1)[1].strip()
+
+ if line.startswith("Requires:"):
+ i = requires
+ elif line.startswith("Provides:"):
+ i = provides
+ else:
+ continue
+
+ file = f.replace(pkgdest + "/" + pkg, "")
+ file = file.replace("@", "@at@")
+ file = file.replace(" ", "@space@")
+ file = file.replace("\t", "@tab@")
+ file = file.replace("[", "@openbrace@")
+ file = file.replace("]", "@closebrace@")
+ file = file.replace("_", "@underscore@")
+ value = line.split(":", 1)[1].strip()
+ value = r.sub(r'(\g<0>)', value)
+
+ if value.startswith("rpmlib("):
+ continue
+ if value == "python":
+ continue
+ if file not in i:
+ i[file] = []
+ i[file].append(value)
+
+ for file in provides:
+ provides_files.append(file)
+ key = "FILERPROVIDES_" + file + "_" + pkg
+ d.setVar(key, " ".join(provides[file]))
+
+ for file in requires:
+ requires_files.append(file)
+ key = "FILERDEPENDS_" + file + "_" + pkg
+ d.setVar(key, " ".join(requires[file]))
+
+ def chunks(files, n):
+ return [files[i:i+n] for i in range(0, len(files), n)]
+
+ # Determine dependencies
+ for pkg in packages.split():
+ if pkg.endswith('-dbg') or pkg.endswith('-doc') or pkg.find('-locale-') != -1 or pkg.find('-localedata-') != -1 or pkg.find('-gconv-') != -1 or pkg.find('-charmap-') != -1 or pkg.startswith('kernel-module-'):
+ continue
+
+ provides_files = []
+ requires_files = []
+ rpfiles = []
+ for root, dirs, files in os.walk(pkgdest + "/" + pkg):
+ for file in files:
+ rpfiles.append(os.path.join(root, file))
+
+ for files in chunks(rpfiles, 100):
+ dep_pipe = os.popen(rpmdeps + " " + " ".join(files))
+
+ process_deps(dep_pipe, pkg, provides_files, requires_files)
+
+ d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files))
+ d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files))
+}
+
SHLIBSDIR = "${STAGING_DIR_HOST}/shlibs"
+SHLIBSWORKDIR = "${WORKDIR}/shlibs"
python package_do_shlibs() {
- import os, re, os.path
+ import re, pipes
- exclude_shlibs = bb.data.getVar('EXCLUDE_FROM_SHLIBS', d, 0)
+ exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', 0)
if exclude_shlibs:
bb.note("not generating shlibs")
return
@@ -553,31 +1238,28 @@ python package_do_shlibs() {
lib_re = re.compile("^.*\.so")
libdir_re = re.compile(".*/lib$")
- packages = bb.data.getVar('PACKAGES', d, 1)
- targetos = bb.data.getVar('TARGET_OS', d, 1)
+ packages = d.getVar('PACKAGES', True)
+ targetos = d.getVar('TARGET_OS', True)
- workdir = bb.data.getVar('WORKDIR', d, 1)
- if not workdir:
- bb.error("WORKDIR not defined")
- return
+ workdir = d.getVar('WORKDIR', True)
- ver = bb.data.getVar('PV', d, 1)
+ ver = d.getVar('PKGV', True)
if not ver:
- bb.error("PV not defined")
+ bb.error("PKGV not defined")
return
- pkgdest = bb.data.getVar('PKGDEST', d, 1)
+ pkgdest = d.getVar('PKGDEST', True)
- shlibs_dir = bb.data.getVar('SHLIBSDIR', d, 1)
- bb.mkdirhier(shlibs_dir)
+ shlibs_dir = d.getVar('SHLIBSDIR', True)
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
- pstageactive = bb.data.getVar('PSTAGING_ACTIVE', d, True)
- if pstageactive == "1":
- lf = bb.utils.lockfile(bb.data.expand("${STAGING_DIR}/staging.lock", d))
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
def linux_so(root, path, file):
- cmd = bb.data.getVar('OBJDUMP', d, 1) + " -p " + os.path.join(root, file) + " 2>/dev/null"
- cmd = "PATH=\"%s\" %s" % (bb.data.getVar('PATH', d, 1), cmd)
+ needs_ldconfig = False
+ cmd = d.getVar('OBJDUMP', True) + " -p " + pipes.quote(os.path.join(root, file)) + " 2>/dev/null"
+ cmd = "PATH=\"%s\" %s" % (d.getVar('PATH', True), cmd)
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
@@ -586,12 +1268,18 @@ python package_do_shlibs() {
if m:
needed[pkg].append(m.group(1))
m = re.match("\s+SONAME\s+([^\s]*)", l)
- if m and not m.group(1) in sonames:
- # if library is private (only used by package) then do not build shlib for it
- if not private_libs or -1 == private_libs.find(m.group(1)):
- sonames.append(m.group(1))
- if m and libdir_re.match(root):
- needs_ldconfig = True
+ if m:
+ this_soname = m.group(1)
+ if not this_soname in sonames:
+ # if library is private (only used by package) then do not build shlib for it
+ if not private_libs or -1 == private_libs.find(this_soname):
+ sonames.append(this_soname)
+ if libdir_re.match(root):
+ needs_ldconfig = True
+ if snap_symlinks and (file != this_soname):
+ renames.append((os.path.join(root, file), os.path.join(root, this_soname)))
+ return needs_ldconfig
+
def darwin_so(root, path, file):
fullpath = os.path.join(root, file)
if not os.path.exists(fullpath):
@@ -619,7 +1307,7 @@ python package_do_shlibs() {
if not combo in sonames:
sonames.append(combo)
if file.endswith('.dylib') or file.endswith('.so'):
- lafile = fullpath.replace(os.path.join(pkgdest, pkg), bb.data.getVar('D', d, 1))
+ lafile = fullpath.replace(os.path.join(pkgdest, pkg), d.getVar('PKGD', True))
# Drop suffix
lafile = lafile.rsplit(".",1)[0]
lapath = os.path.dirname(lafile)
@@ -653,14 +1341,33 @@ python package_do_shlibs() {
if name:
needed[pkg].append(name)
#bb.note("Adding %s for %s" % (name, pkg))
+
+ if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS', True) == "1":
+ snap_symlinks = True
+ else:
+ snap_symlinks = False
+
+ if (d.getVar('USE_LDCONFIG', True) or "1") == "1":
+ use_ldconfig = True
+ else:
+ use_ldconfig = False
+
needed = {}
- private_libs = bb.data.getVar('PRIVATE_LIBS', d, 1)
+ shlib_provider = {}
for pkg in packages.split():
+ private_libs = d.getVar('PRIVATE_LIBS_' + pkg, True) or d.getVar('PRIVATE_LIBS', True)
needs_ldconfig = False
bb.debug(2, "calculating shlib provides for %s" % pkg)
+ pkgver = d.getVar('PKGV_' + pkg, True)
+ if not pkgver:
+ pkgver = d.getVar('PV_' + pkg, True)
+ if not pkgver:
+ pkgver = ver
+
needed[pkg] = []
sonames = list()
+ renames = list()
top = os.path.join(pkgdest, pkg)
for root, dirs, files in os.walk(top):
for file in files:
@@ -671,35 +1378,30 @@ python package_do_shlibs() {
if targetos == "darwin" or targetos == "darwin8":
darwin_so(root, dirs, file)
elif os.access(path, os.X_OK) or lib_re.match(file):
- linux_so(root, dirs, file)
- shlibs_file = os.path.join(shlibs_dir, pkg + ".list")
- if os.path.exists(shlibs_file):
- os.remove(shlibs_file)
- shver_file = os.path.join(shlibs_dir, pkg + ".ver")
- if os.path.exists(shver_file):
- os.remove(shver_file)
+ ldconfig = linux_so(root, dirs, file)
+ needs_ldconfig = needs_ldconfig or ldconfig
+ for (old, new) in renames:
+ bb.note("Renaming %s to %s" % (old, new))
+ os.rename(old, new)
+ shlibs_file = os.path.join(shlibswork_dir, pkg + ".list")
+ shver_file = os.path.join(shlibswork_dir, pkg + ".ver")
if len(sonames):
fd = open(shlibs_file, 'w')
for s in sonames:
fd.write(s + '\n')
+ shlib_provider[s] = (pkg, pkgver)
fd.close()
- package_stagefile(shlibs_file, d)
fd = open(shver_file, 'w')
- fd.write(ver + '\n')
+ fd.write(pkgver + '\n')
fd.close()
- package_stagefile(shver_file, d)
- if needs_ldconfig:
+ if needs_ldconfig and use_ldconfig:
bb.debug(1, 'adding ldconfig call to postinst for %s' % pkg)
- postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += bb.data.getVar('ldconfig_postinst_fragment', d, 1)
- bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
-
- if pstageactive == "1":
- bb.utils.unlockfile(lf)
+ postinst += d.getVar('ldconfig_postinst_fragment', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
- shlib_provider = {}
list_re = re.compile('^(.*)\.list$')
for dir in [shlibs_dir]:
if not os.path.exists(dir):
@@ -720,7 +1422,9 @@ python package_do_shlibs() {
for l in lines:
shlib_provider[l.rstrip()] = (dep_pkg, lib_ver)
- assumed_libs = bb.data.getVar('ASSUME_SHLIBS', d, 1)
+ bb.utils.unlockfile(lf)
+
+ assumed_libs = d.getVar('ASSUME_SHLIBS', True)
if assumed_libs:
for e in assumed_libs.split():
l, dep_pkg = e.split(":")
@@ -762,19 +1466,14 @@ python package_do_shlibs() {
}
python package_do_pkgconfig () {
- import re, os
-
- packages = bb.data.getVar('PACKAGES', d, 1)
-
- workdir = bb.data.getVar('WORKDIR', d, 1)
- if not workdir:
- bb.error("WORKDIR not defined")
- return
+ import re
- pkgdest = bb.data.getVar('PKGDEST', d, 1)
+ packages = d.getVar('PACKAGES', True)
+ workdir = d.getVar('WORKDIR', True)
+ pkgdest = d.getVar('PKGDEST', True)
- shlibs_dir = bb.data.getVar('SHLIBSDIR', d, 1)
- bb.mkdirhier(shlibs_dir)
+ shlibs_dir = d.getVar('SHLIBSDIR', True)
+ shlibswork_dir = d.getVar('SHLIBSWORKDIR', True)
pc_re = re.compile('(.*)\.pc$')
var_re = re.compile('(.*)=(.*)')
@@ -804,7 +1503,7 @@ python package_do_pkgconfig () {
if m:
name = m.group(1)
val = m.group(2)
- bb.data.setVar(name, bb.data.expand(val, pd), pd)
+ pd.setVar(name, pd.expand(val))
continue
m = field_re.match(l)
if m:
@@ -813,20 +1512,16 @@ python package_do_pkgconfig () {
if hdr == 'Requires':
pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
- pstageactive = bb.data.getVar('PSTAGING_ACTIVE', d, True)
- if pstageactive == "1":
- lf = bb.utils.lockfile(bb.data.expand("${STAGING_DIR}/staging.lock", d))
+ # Take shared lock since we're only reading, not writing
+ lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"))
for pkg in packages.split():
- pkgs_file = os.path.join(shlibs_dir, pkg + ".pclist")
- if os.path.exists(pkgs_file):
- os.remove(pkgs_file)
+ pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
if pkgconfig_provided[pkg] != []:
f = open(pkgs_file, 'w')
for p in pkgconfig_provided[pkg]:
f.write('%s\n' % p)
f.close()
- package_stagefile(pkgs_file, d)
for dir in [shlibs_dir]:
if not os.path.exists(dir):
@@ -854,32 +1549,29 @@ python package_do_pkgconfig () {
if found == False:
bb.note("couldn't find pkgconfig module '%s' in any package" % n)
deps_file = os.path.join(pkgdest, pkg + ".pcdeps")
- if os.path.exists(deps_file):
- os.remove(deps_file)
if len(deps):
fd = open(deps_file, 'w')
for dep in deps:
fd.write(dep + '\n')
fd.close()
- package_stagefile(deps_file, d)
- if pstageactive == "1":
- bb.utils.unlockfile(lf)
+ bb.utils.unlockfile(lf)
}
python read_shlibdeps () {
- packages = bb.data.getVar('PACKAGES', d, 1).split()
+ packages = d.getVar('PACKAGES', True).split()
for pkg in packages:
- rdepends = bb.utils.explode_deps(bb.data.getVar('RDEPENDS_' + pkg, d, 0) or bb.data.getVar('RDEPENDS', d, 0) or "")
+ rdepends = bb.utils.explode_dep_versions(d.getVar('RDEPENDS_' + pkg, False) or d.getVar('RDEPENDS', False) or "")
+
for extension in ".shlibdeps", ".pcdeps", ".clilibdeps":
- depsfile = bb.data.expand("${PKGDEST}/" + pkg + extension, d)
+ depsfile = d.expand("${PKGDEST}/" + pkg + extension)
if os.access(depsfile, os.R_OK):
fd = file(depsfile)
lines = fd.readlines()
fd.close()
for l in lines:
- rdepends.append(l.rstrip())
- bb.data.setVar('RDEPENDS_' + pkg, " " + " ".join(rdepends), d)
+ rdepends[l.rstrip()] = ""
+ d.setVar('RDEPENDS_' + pkg, bb.utils.join_deps(rdepends, commasep=False))
}
python package_depchains() {
@@ -896,14 +1588,14 @@ python package_depchains() {
package.
"""
- packages = bb.data.getVar('PACKAGES', d, 1)
- postfixes = (bb.data.getVar('DEPCHAIN_POST', d, 1) or '').split()
- prefixes = (bb.data.getVar('DEPCHAIN_PRE', d, 1) or '').split()
+ packages = d.getVar('PACKAGES', True)
+ postfixes = (d.getVar('DEPCHAIN_POST', True) or '').split()
+ prefixes = (d.getVar('DEPCHAIN_PRE', True) or '').split()
def pkg_adddeprrecs(pkg, base, suffix, getname, depends, d):
#bb.note('depends for %s is %s' % (base, depends))
- rreclist = bb.utils.explode_deps(bb.data.getVar('RRECOMMENDS_' + pkg, d, 1) or bb.data.getVar('RRECOMMENDS', d, 1) or "")
+ rreclist = bb.utils.explode_dep_versions(d.getVar('RRECOMMENDS_' + pkg, True) or d.getVar('RRECOMMENDS', True) or "")
for depend in depends:
if depend.find('-native') != -1 or depend.find('-cross') != -1 or depend.startswith('virtual/'):
@@ -915,16 +1607,16 @@ python package_depchains() {
depend = depend.replace('-dbg', '')
pkgname = getname(depend, suffix)
#bb.note("Adding %s for %s" % (pkgname, depend))
- if not pkgname in rreclist:
- rreclist.append(pkgname)
+ if pkgname not in rreclist:
+ rreclist[pkgname] = ""
#bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- bb.data.setVar('RRECOMMENDS_%s' % pkg, ' '.join(rreclist), d)
+ d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def pkg_addrrecs(pkg, base, suffix, getname, rdepends, d):
#bb.note('rdepends for %s is %s' % (base, rdepends))
- rreclist = bb.utils.explode_deps(bb.data.getVar('RRECOMMENDS_' + pkg, d, 1) or bb.data.getVar('RRECOMMENDS', d, 1) or "")
+ rreclist = bb.utils.explode_dep_versions(d.getVar('RRECOMMENDS_' + pkg, True) or d.getVar('RRECOMMENDS', True) or "")
for depend in rdepends:
if depend.find('virtual-locale-') != -1:
@@ -936,11 +1628,11 @@ python package_depchains() {
depend = depend.replace('-dbg', '')
pkgname = getname(depend, suffix)
#bb.note("Adding %s for %s" % (pkgname, depend))
- if not pkgname in rreclist:
- rreclist.append(pkgname)
+ if pkgname not in rreclist:
+ rreclist[pkgname] = ""
#bb.note('setting: RRECOMMENDS_%s=%s' % (pkg, ' '.join(rreclist)))
- bb.data.setVar('RRECOMMENDS_%s' % pkg, ' '.join(rreclist), d)
+ d.setVar('RRECOMMENDS_%s' % pkg, bb.utils.join_deps(rreclist, commasep=False))
def add_dep(list, dep):
dep = dep.split(' (')[0].strip()
@@ -948,15 +1640,15 @@ python package_depchains() {
list.append(dep)
depends = []
- for dep in bb.utils.explode_deps(bb.data.getVar('DEPENDS', d, 1) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('DEPENDS', True) or ""):
add_dep(depends, dep)
rdepends = []
- for dep in bb.utils.explode_deps(bb.data.getVar('RDEPENDS', d, 1) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS', True) or ""):
add_dep(rdepends, dep)
for pkg in packages.split():
- for dep in bb.utils.explode_deps(bb.data.getVar('RDEPENDS_' + pkg, d, 1) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + pkg, True) or ""):
add_dep(rdepends, dep)
#bb.note('rdepends is %s' % rdepends)
@@ -982,6 +1674,8 @@ python package_depchains() {
for suffix in pkgs:
for pkg in pkgs[suffix]:
+ if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
+ continue
(base, func) = pkgs[suffix][pkg]
if suffix == "-dev":
pkg_adddeprrecs(pkg, base, suffix, func, depends, d)
@@ -989,45 +1683,98 @@ python package_depchains() {
pkg_addrrecs(pkg, base, suffix, func, rdepends, d)
else:
rdeps = []
- for dep in bb.utils.explode_deps(bb.data.getVar('RDEPENDS_' + base, d, 1) or bb.data.getVar('RDEPENDS', d, 1) or ""):
+ for dep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + base, True) or d.getVar('RDEPENDS', True) or ""):
add_dep(rdeps, dep)
pkg_addrrecs(pkg, base, suffix, func, rdeps, d)
}
+# Since bitbake can't determine which variables are accessed during package
+# iteration, we need to list them here:
+PACKAGEVARS = "FILES RDEPENDS RRECOMMENDS SUMMARY DESCRIPTION RSUGGESTS RPROVIDES RCONFLICTS PKG ALLOW_EMPTY pkg_postinst pkg_postrm INITSCRIPT_NAME INITSCRIPT_PARAMS DEBIAN_NOAUTONAME"
+
+def gen_packagevar(d):
+ ret = []
+ pkgs = (d.getVar("PACKAGES", True) or "").split()
+ vars = (d.getVar("PACKAGEVARS", True) or "").split()
+ for p in pkgs:
+ for v in vars:
+ ret.append(v + "_" + p)
+ return " ".join(ret)
-PACKAGEFUNCS ?= "package_do_split_locales \
+PACKAGE_PREPROCESS_FUNCS ?= ""
+PACKAGEFUNCS ?= "package_get_auto_pr \
+ perform_packagecopy \
+ ${PACKAGE_PREPROCESS_FUNCS} \
+ package_do_split_locales \
+ split_and_strip_files \
+ fixup_perms \
populate_packages \
+ package_do_filedeps \
package_do_shlibs \
package_do_pkgconfig \
read_shlibdeps \
package_depchains \
emit_pkgdata"
-python package_do_package () {
- packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
+python do_package () {
+ # Change the following version to cause sstate to invalidate the package
+ # cache. This is useful if an item this class depends on changes in a
+ # way that the output of this class changes. rpmdeps is a good example
+ # as any change to rpmdeps requires this to be rerun.
+ # PACKAGE_BBCLASS_VERSION = "1"
+
+ packages = (d.getVar('PACKAGES', True) or "").split()
if len(packages) < 1:
bb.debug(1, "No packages to build, skipping do_package")
return
- for f in (bb.data.getVar('PACKAGEFUNCS', d, 1) or '').split():
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('DEPLOY_DIR', True)
+ dest = d.getVar('D', True)
+ dvar = d.getVar('PKGD', True)
+ pn = d.getVar('PN', True)
+
+ if not workdir or not outdir or not dest or not dvar or not pn or not packages:
+ bb.error("WORKDIR, DEPLOY_DIR, D, PN and PKGD all must be defined, unable to package")
+ return
+
+ for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
bb.build.exec_func(f, d)
}
-do_package[dirs] = "${D}"
+
+do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
+do_package[vardeps] += "${PACKAGEFUNCS} ${@gen_packagevar(d)}"
addtask package before do_build after do_install
+PACKAGELOCK = "${STAGING_DIR}/package-output.lock"
+SSTATETASKS += "do_package"
+do_package[sstate-name] = "package"
+do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST}"
+do_package[sstate-inputdirs] = "${PKGDESTWORK} ${SHLIBSWORKDIR}"
+do_package[sstate-outputdirs] = "${PKGDATA_DIR} ${SHLIBSDIR}"
+do_package[sstate-lockfile-shared] = "${PACKAGELOCK}"
+do_package[stamp-extra-info] = "${MACHINE}"
+do_package_setscene[dirs] = "${STAGING_DIR}"
+
+python do_package_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_setscene
+
# Dummy task to mark when all packaging is complete
do_package_write () {
:
}
+do_package_write[noexec] = "1"
+PACKAGERDEPTASK = "do_package_write"
+do_build[recrdeptask] += "${PACKAGERDEPTASK}"
addtask package_write before do_build after do_package
-EXPORT_FUNCTIONS do_package do_package_write
-
#
# Helper functions for the package writing classes
#
-python package_mapping_rename_hook () {
+def mapping_rename_hook(d):
"""
Rewrite variables to account for package renaming in things
like debian.bbclass or manual PKG variable name changes
@@ -1038,6 +1785,4 @@ python package_mapping_rename_hook () {
runtime_mapping_rename("RPROVIDES", d)
runtime_mapping_rename("RREPLACES", d)
runtime_mapping_rename("RCONFLICTS", d)
-}
-EXPORT_FUNCTIONS mapping_rename_hook
diff --git a/meta/classes/package_deb.bbclass b/meta/classes/package_deb.bbclass
index d90939fdb..4096fa2b8 100644
--- a/meta/classes/package_deb.bbclass
+++ b/meta/classes/package_deb.bbclass
@@ -6,29 +6,23 @@ inherit package
IMAGE_PKGTYPE ?= "deb"
-# Map TARGET_ARCH to Debian's ideas about architectures
DPKG_ARCH ?= "${TARGET_ARCH}"
-DPKG_ARCH_x86 ?= "i386"
-DPKG_ARCH_i486 ?= "i386"
-DPKG_ARCH_i586 ?= "i386"
-DPKG_ARCH_i686 ?= "i386"
-DPKG_ARCH_pentium ?= "i386"
+
+PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
python package_deb_fn () {
- from bb import data
- bb.data.setVar('PKGFN', bb.data.getVar('PKG',d), d)
+ d.setVar('PKGFN', d.getVar('PKG'))
}
addtask package_deb_install
python do_package_deb_install () {
- import os, sys
- pkg = bb.data.getVar('PKG', d, 1)
- pkgfn = bb.data.getVar('PKGFN', d, 1)
- rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1)
- debdir = bb.data.getVar('DEPLOY_DIR_DEB', d, 1)
- apt_config = bb.data.expand('${STAGING_ETCDIR_NATIVE}/apt/apt.conf', d)
- stagingbindir = bb.data.getVar('STAGING_BINDIR_NATIVE', d, 1)
- tmpdir = bb.data.getVar('TMPDIR', d, 1)
+ pkg = d.getVar('PKG', True)
+ pkgfn = d.getVar('PKGFN', True)
+ rootfs = d.getVar('IMAGE_ROOTFS', True)
+ debdir = d.getVar('DEPLOY_DIR_DEB', True)
+ apt_config = d.expand('${STAGING_ETCDIR_NATIVE}/apt/apt.conf')
+ stagingbindir = d.getVar('STAGING_BINDIR_NATIVE', True)
+ tmpdir = d.getVar('TMPDIR', True)
if None in (pkg,pkgfn,rootfs):
raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGE_ROOTFS)")
@@ -37,6 +31,7 @@ python do_package_deb_install () {
os.makedirs(rootfs)
os.chdir(rootfs)
except OSError:
+ import sys
raise bb.build.FuncFailed(str(sys.exc_value))
# update packages file
@@ -66,32 +61,167 @@ python do_package_deb_install () {
os.putenv('PATH', path)
}
+#
+# Update the Packages index files in ${DEPLOY_DIR_DEB}
+#
+package_update_index_deb () {
+
+ local debarchs=""
+
+ if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
+ return
+ fi
+
+ for arch in ${PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}; do
+ if [ -e ${DEPLOY_DIR_DEB}/$arch ]; then
+ debarchs="$debarchs $arch"
+ fi
+ done
+
+ for arch in $debarchs; do
+ if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
+ continue;
+ fi
+ cd ${DEPLOY_DIR_DEB}/$arch
+ dpkg-scanpackages . | gzip > Packages.gz
+ echo "Label: $arch" > Release
+ done
+}
+
+#
+# install a bunch of packages using apt
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_DEB - install root dir
+# INSTALL_BASEARCH_DEB - install base architecutre
+# INSTALL_ARCHS_DEB - list of available archs
+# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
+# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
+# INSTALL_TASK_DEB - task name
+
+package_install_internal_deb () {
+
+ local target_rootfs="${INSTALL_ROOTFS_DEB}"
+ local dpkg_arch="${INSTALL_BASEARCH_DEB}"
+ local archs="${INSTALL_ARCHS_DEB}"
+ local package_to_install="${INSTALL_PACKAGES_NORMAL_DEB}"
+ local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_DEB}"
+ local package_linguas="${INSTALL_PACKAGES_LINGUAS_DEB}"
+ local task="${INSTALL_TASK_DEB}"
+
+ rm -f ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
+ rm -f ${STAGING_ETCDIR_NATIVE}/apt/preferences
+
+ priority=1
+ for arch in $archs; do
+ if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
+ continue;
+ fi
+
+ echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
+ (echo "Package: *"
+ echo "Pin: release l=$arch"
+ echo "Pin-Priority: $(expr 800 + $priority)"
+ echo) >> ${STAGING_ETCDIR_NATIVE}/apt/preferences
+ priority=$(expr $priority + 5)
+ done
+
+ tac ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev > ${STAGING_ETCDIR_NATIVE}/apt/sources.list
+
+ cat "${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample" \
+ | sed -e "s#Architecture \".*\";#Architecture \"${dpkg_arch}\";#" \
+ | sed -e "s:#ROOTFS#:${target_rootfs}:g" \
+ > "${STAGING_ETCDIR_NATIVE}/apt/apt-${task}.conf"
+
+ export APT_CONFIG="${STAGING_ETCDIR_NATIVE}/apt/apt-${task}.conf"
+
+ mkdir -p ${target_rootfs}/var/lib/dpkg/info
+ mkdir -p ${target_rootfs}/var/lib/dpkg/updates
+
+ > ${target_rootfs}/var/lib/dpkg/status
+ > ${target_rootfs}/var/lib/dpkg/available
+
+ apt-get update
+
+ # Uclibc builds don't provide this stuff..
+ if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
+ if [ ! -z "${package_linguas}" ]; then
+ apt-get install glibc-localedata-i18n --force-yes --allow-unauthenticated
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ for i in ${package_linguas}; do
+ apt-get install $i --force-yes --allow-unauthenticated
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ done
+ fi
+ fi
+
+ # normal install
+ for i in ${package_to_install}; do
+ apt-get install $i --force-yes --allow-unauthenticated
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ done
+
+ rm -f ${WORKDIR}/temp/log.do_${task}-attemptonly.${PID}
+ if [ ! -z "${package_attemptonly}" ]; then
+ for i in ${package_attemptonly}; do
+ apt-get install $i --force-yes --allow-unauthenticated >> ${WORKDIR}/temp/log.do_${task}-attemptonly.${PID} 2>&1 || true
+ done
+ fi
+
+ find ${target_rootfs} -name \*.dpkg-new | for i in `cat`; do
+ mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
+ done
+
+ # Mark all packages installed
+ sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${target_rootfs}/var/lib/dpkg/status
+}
+
+deb_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "E:"
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
+
python do_package_deb () {
- import sys, re, copy
+ import re, copy
+ import textwrap
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- import os # path manipulations
- outdir = bb.data.getVar('DEPLOY_DIR_DEB', d, 1)
+ outdir = d.getVar('PKGWRITEDIRDEB', True)
if not outdir:
- bb.error("DEPLOY_DIR_DEB not defined, unable to package")
- return
-
- dvar = bb.data.getVar('D', d, 1)
- if not dvar:
- bb.error("D not defined, unable to package")
+ bb.error("PKGWRITEDIRDEB not defined, unable to package")
return
- bb.mkdirhier(dvar)
- packages = bb.data.getVar('PACKAGES', d, 1)
+ packages = d.getVar('PACKAGES', True)
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
- tmpdir = bb.data.getVar('TMPDIR', d, 1)
+ tmpdir = d.getVar('TMPDIR', True)
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
@@ -100,30 +230,27 @@ python do_package_deb () {
bb.debug(1, "No packages; nothing to do")
return
+ pkgdest = d.getVar('PKGDEST', True)
+
for pkg in packages.split():
localdata = bb.data.createCopy(d)
- pkgdest = bb.data.getVar('PKGDEST', d, 1)
root = "%s/%s" % (pkgdest, pkg)
lf = bb.utils.lockfile(root + ".lock")
- bb.data.setVar('ROOT', '', localdata)
- bb.data.setVar('ROOT_%s' % pkg, root, localdata)
- pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1)
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
if not pkgname:
pkgname = pkg
- bb.data.setVar('PKG', pkgname, localdata)
+ localdata.setVar('PKG', pkgname)
- overrides = bb.data.getVar('OVERRIDES', localdata)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
- overrides = bb.data.expand(overrides, localdata)
- bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+ localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
- pkgoutdir = os.path.join(outdir, bb.data.getVar('PACKAGE_ARCH', localdata, 1))
+ pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
bb.mkdirhier(pkgoutdir)
os.chdir(root)
@@ -134,9 +261,8 @@ python do_package_deb () {
del g[g.index('./DEBIAN')]
except ValueError:
pass
- if not g and bb.data.getVar('ALLOW_EMPTY', localdata) != "1":
- from bb import note
- note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1)))
+ if not g and localdata.getVar('ALLOW_EMPTY') != "1":
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
bb.utils.unlockfile(lf)
continue
@@ -152,11 +278,11 @@ python do_package_deb () {
raise bb.build.FuncFailed("unable to open control file for writing.")
fields = []
- pe = bb.data.getVar('PE', d, 1)
+ pe = d.getVar('PKGE', True)
if pe and int(pe) > 0:
- fields.append(["Version: %s:%s-%s\n", ['PE', 'PV', 'PR']])
+ fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
- fields.append(["Version: %s-%s\n", ['PV', 'PR']])
+ fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
fields.append(["Description: %s\n", ['DESCRIPTION']])
fields.append(["Section: %s\n", ['SECTION']])
fields.append(["Priority: %s\n", ['PRIORITY']])
@@ -165,17 +291,17 @@ python do_package_deb () {
fields.append(["OE: %s\n", ['PN']])
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
-# Package, Version, Maintainer, Description - mandatory
-# Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
+ # Package, Version, Maintainer, Description - mandatory
+ # Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
def pullData(l, d):
l2 = []
for i in l:
- data = bb.data.getVar(i, d, 1)
+ data = d.getVar(i, True)
if data is None:
raise KeyError(f)
- if i == 'DPKG_ARCH' and bb.data.getVar('PACKAGE_ARCH', d, 1) == 'all':
+ if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
data = 'all'
l2.append(data)
return l2
@@ -184,40 +310,56 @@ python do_package_deb () {
# check for required fields
try:
for (c, fs) in fields:
- ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
+ for f in fs:
+ if localdata.getVar(f) is None:
+ raise KeyError(f)
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
+ description = localdata.getVar('DESCRIPTION', True) or "."
+ description = textwrap.dedent(description).strip()
+ ctrlfile.write('Description: %s\n' % unicode(summary))
+ ctrlfile.write('%s\n' % unicode(textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' ')))
+ else:
+ ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
except KeyError:
+ import sys
(type, value, traceback) = sys.exc_info()
bb.utils.unlockfile(lf)
ctrlfile.close()
raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
# more fields
- bb.build.exec_func("mapping_rename_hook", localdata)
+ mapping_rename_hook(localdata)
- rdepends = bb.utils.explode_deps(unicode(bb.data.getVar("RDEPENDS", localdata, 1) or ""))
- rdepends = [dep for dep in rdepends if not '*' in dep]
- rrecommends = bb.utils.explode_deps(unicode(bb.data.getVar("RRECOMMENDS", localdata, 1) or ""))
- rrecommends = [rec for rec in rrecommends if not '*' in rec]
- rsuggests = (unicode(bb.data.getVar("RSUGGESTS", localdata, 1) or "")).split()
- rprovides = (unicode(bb.data.getVar("RPROVIDES", localdata, 1) or "")).split()
- rreplaces = (unicode(bb.data.getVar("RREPLACES", localdata, 1) or "")).split()
- rconflicts = (unicode(bb.data.getVar("RCONFLICTS", localdata, 1) or "")).split()
+ rdepends = bb.utils.explode_dep_versions(localdata.getVar("RDEPENDS", True) or "")
+ for dep in rdepends:
+ if '*' in dep:
+ del rdepends[dep]
+ rrecommends = bb.utils.explode_dep_versions(localdata.getVar("RRECOMMENDS", True) or "")
+ for dep in rrecommends:
+ if '*' in dep:
+ del rrecommends[dep]
+ rsuggests = bb.utils.explode_dep_versions(localdata.getVar("RSUGGESTS", True) or "")
+ rprovides = bb.utils.explode_dep_versions(localdata.getVar("RPROVIDES", True) or "")
+ rreplaces = bb.utils.explode_dep_versions(localdata.getVar("RREPLACES", True) or "")
+ rconflicts = bb.utils.explode_dep_versions(localdata.getVar("RCONFLICTS", True) or "")
if rdepends:
- ctrlfile.write(u"Depends: %s\n" % ", ".join(rdepends))
+ ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
if rsuggests:
- ctrlfile.write(u"Suggests: %s\n" % ", ".join(rsuggests))
+ ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
if rrecommends:
- ctrlfile.write(u"Recommends: %s\n" % ", ".join(rrecommends))
+ ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
if rprovides:
- ctrlfile.write(u"Provides: %s\n" % ", ".join(rprovides))
+ ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
if rreplaces:
- ctrlfile.write(u"Replaces: %s\n" % ", ".join(rreplaces))
+ ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
if rconflicts:
- ctrlfile.write(u"Conflicts: %s\n" % ", ".join(rconflicts))
+ ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = bb.data.getVar('pkg_%s' % script, localdata, 1)
+ scriptvar = localdata.getVar('pkg_%s' % script, True)
if not scriptvar:
continue
try:
@@ -230,7 +372,7 @@ python do_package_deb () {
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
- conffiles_str = bb.data.getVar("CONFFILES", localdata, 1)
+ conffiles_str = localdata.getVar("CONFFILES", True)
if conffiles_str:
try:
conffiles = file(os.path.join(controldir, 'conffiles'), 'w')
@@ -242,8 +384,9 @@ python do_package_deb () {
conffiles.close()
os.chdir(basedir)
- ret = os.system("PATH=\"%s\" fakeroot dpkg-deb -b %s %s" % (bb.data.getVar("PATH", localdata, 1), root, pkgoutdir))
+ ret = os.system("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir))
if ret != 0:
+ bb.utils.prunedir(controldir)
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("dpkg-deb execution failed")
@@ -251,19 +394,37 @@ python do_package_deb () {
bb.utils.unlockfile(lf)
}
+SSTATETASKS += "do_package_write_deb"
+do_package_write_deb[sstate-name] = "deploy-deb"
+do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
+do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
+
+python do_package_write_deb_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_write_deb_setscene
+
python () {
- import bb
- if bb.data.getVar('PACKAGES', d, True) != '':
- deps = (bb.data.getVarFlag('do_package_write_deb', 'depends', d) or "").split()
- deps.append('dpkg-native:do_populate_staging')
- deps.append('fakeroot-native:do_populate_staging')
- bb.data.setVarFlag('do_package_write_deb', 'depends', " ".join(deps), d)
+ if d.getVar('PACKAGES', True) != '':
+ deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_deb', 'depends', deps)
+ d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
+ d.setVarFlag('do_package_write_deb_setscene', 'fakeroot', "1")
+
+ # Map TARGET_ARCH to Debian's ideas about architectures
+ if d.getVar('DPKG_ARCH', True) in ["x86", "i486", "i586", "i686", "pentium"]:
+ d.setVar('DPKG_ARCH', 'i386')
}
python do_package_write_deb () {
bb.build.exec_func("read_subpackage_metadata", d)
bb.build.exec_func("do_package_deb", d)
}
-do_package_write_deb[dirs] = "${D}"
+do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
+do_package_write_deb[umask] = "022"
addtask package_write_deb before do_package_write after do_package
+
+PACKAGEINDEXES += "package_update_index_deb;"
+PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"
diff --git a/meta/classes/package_ipk.bbclass b/meta/classes/package_ipk.bbclass
index 1aa2c814b..73ec0ee14 100644
--- a/meta/classes/package_ipk.bbclass
+++ b/meta/classes/package_ipk.bbclass
@@ -2,22 +2,25 @@ inherit package
IMAGE_PKGTYPE ?= "ipk"
-IPKGCONF_TARGET = "${STAGING_ETCDIR_NATIVE}/opkg.conf"
-IPKGCONF_SDK = "${STAGING_ETCDIR_NATIVE}/opkg-sdk.conf"
+IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
+IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
+
+PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
+
+# Program to be used to build opkg packages
+OPKGBUILDCMD ??= "opkg-build"
python package_ipk_fn () {
- from bb import data
- bb.data.setVar('PKGFN', bb.data.getVar('PKG',d), d)
+ d.setVar('PKGFN', d.getVar('PKG'))
}
python package_ipk_install () {
- import os, sys
- pkg = bb.data.getVar('PKG', d, 1)
- pkgfn = bb.data.getVar('PKGFN', d, 1)
- rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1)
- ipkdir = bb.data.getVar('DEPLOY_DIR_IPK', d, 1)
- stagingdir = bb.data.getVar('STAGING_DIR', d, 1)
- tmpdir = bb.data.getVar('TMPDIR', d, 1)
+ pkg = d.getVar('PKG', True)
+ pkgfn = d.getVar('PKGFN', True)
+ rootfs = d.getVar('IMAGE_ROOTFS', True)
+ ipkdir = d.getVar('DEPLOY_DIR_IPK', True)
+ stagingdir = d.getVar('STAGING_DIR', True)
+ tmpdir = d.getVar('TMPDIR', True)
if None in (pkg,pkgfn,rootfs):
raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGEROOTFS)")
@@ -25,6 +28,7 @@ python package_ipk_install () {
bb.mkdirhier(rootfs)
os.chdir(rootfs)
except OSError:
+ import sys
(type, value, traceback) = sys.exc_info()
print value
raise bb.build.FuncFailed
@@ -32,7 +36,7 @@ python package_ipk_install () {
# Generate ipk.conf if it or the stamp doesnt exist
conffile = os.path.join(stagingdir,"ipkg.conf")
if not os.access(conffile, os.R_OK):
- ipkg_archs = bb.data.getVar('PACKAGE_ARCHS',d)
+ ipkg_archs = d.getVar('PACKAGE_ARCHS')
if ipkg_archs is None:
bb.error("PACKAGE_ARCHS missing")
raise FuncFailed
@@ -47,8 +51,7 @@ python package_ipk_install () {
f.close()
- if (not os.access(os.path.join(ipkdir,"Packages"), os.R_OK) or
- not os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"),os.R_OK):
+ if not os.access(os.path.join(ipkdir,"Packages"), os.R_OK) or not os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"),os.R_OK):
ret = os.system('opkg-make-index -p %s %s ' % (os.path.join(ipkdir, "Packages"), ipkdir))
if (ret != 0 ):
raise bb.build.FuncFailed
@@ -61,29 +64,157 @@ python package_ipk_install () {
raise bb.build.FuncFailed
}
+package_tryout_install_multilib_ipk() {
+ #try install multilib
+ multilib_tryout_dirs=""
+ for item in ${MULTILIB_VARIANTS}; do
+ local target_rootfs="${MULTILIB_TEMP_ROOTFS}/${item}"
+ local ipkg_args="-f ${INSTALL_CONF_IPK} -o ${target_rootfs} --force_overwrite"
+ local selected_pkg=""
+ local pkgname_prefix="${item}-"
+ local pkgname_len=${#pkgname_prefix}
+ for pkg in ${INSTALL_PACKAGES_MULTILIB_IPK}; do
+ local pkgname=$(echo $pkg | awk -v var=$pkgname_len '{ pkgname=substr($1, 1, var); print pkgname; }' )
+ if [ ${pkgname} = ${pkgname_prefix} ]; then
+ selected_pkg="${selected_pkg} ${pkg}"
+ fi
+ done
+ if [ ! -z "${selected_pkg}" ]; then
+ rm -f ${target_rootfs}
+ mkdir -p ${target_rootfs}/${opkglibdir}
+ opkg-cl ${ipkg_args} update
+ opkg-cl ${ipkg_args} install ${selected_pkg}
+ multilib_tryout_dirs="${multilib_tryout_dirs} ${target_rootfs}"
+ fi
+ done
+}
+
+split_multilib_packages() {
+ INSTALL_PACKAGES_NORMAL_IPK=""
+ INSTALL_PACKAGES_MULTILIB_IPK=""
+ for pkg in ${INSTALL_PACKAGES_IPK}; do
+ is_multilib=0
+ for item in ${MULTILIB_VARIANTS}; do
+ local pkgname_prefix="${item}-"
+ local pkgname_len=${#pkgname_prefix}
+ local pkgname=$(echo $pkg | awk -v var=$pkgname_len '{ pkgname=substr($1, 1, var); print pkgname; }' )
+ if [ ${pkgname} = ${pkgname_prefix} ]; then
+ is_multilib=1
+ break
+ fi
+ done
+
+ if [ ${is_multilib} = 0 ]; then
+ INSTALL_PACKAGES_NORMAL_IPK="${INSTALL_PACKAGES_NORMAL_IPK} ${pkg}"
+ else
+ INSTALL_PACKAGES_MULTILIB_IPK="${INSTALL_PACKAGES_MULTILIB_IPK} ${pkg}"
+ fi
+ done
+}
+
+#
+# install a bunch of packages using opkg
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_IPK - install root dir
+# INSTALL_CONF_IPK - configuration file
+# INSTALL_PACKAGES_IPK - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_IPK - packages attemped to be installed only
+# INSTALL_PACKAGES_LINGUAS_IPK - additional packages for uclibc
+# INSTALL_TASK_IPK - task name
+
+package_install_internal_ipk() {
+
+ local target_rootfs="${INSTALL_ROOTFS_IPK}"
+ local conffile="${INSTALL_CONF_IPK}"
+ local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_IPK}"
+ local package_linguas="${INSTALL_PACKAGES_LINGUAS_IPK}"
+ local task="${INSTALL_TASK_IPK}"
+
+ split_multilib_packages
+
+ local package_to_install="${INSTALL_PACKAGES_NORMAL_IPK}"
+ local package_multilib="${INSTALL_PACKAGES_MULTILIB_IPK}"
+
+ mkdir -p ${target_rootfs}${localstatedir}/lib/opkg/
+
+ local ipkg_args="-f ${conffile} -o ${target_rootfs} --force-overwrite --force_postinstall"
+
+ opkg-cl ${ipkg_args} update
+
+ # Uclibc builds don't provide this stuff...
+ if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
+ if [ ! -z "${package_linguas}" ]; then
+ for i in ${package_linguas}; do
+ opkg-cl ${ipkg_args} install $i
+ done
+ fi
+ fi
+
+ if [ ! -z "${package_to_install}" ]; then
+ opkg-cl ${ipkg_args} install ${package_to_install}
+ fi
+
+ if [ ! -z "${package_attemptonly}" ]; then
+ opkg-cl ${ipkg_args} install ${package_attemptonly} > "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}" || true
+ fi
+
+ package_tryout_install_multilib_ipk
+ if [ ! -z "${MULTILIB_CHECK_FILE}" ]; then
+ #sanity check
+ multilib_sanity_check ${target_rootfs} ${multilib_tryout_dirs} || exit 1
+ fi
+
+ if [ ! -z "${package_multilib}" ]; then
+ opkg-cl ${ipkg_args} install ${package_multilib}
+ fi
+}
+
+ipk_log_check() {
+ target="$1"
+ lf_path="$2"
+
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "exit 1" "Collected errors" ERR Fail
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
+
#
# Update the Packages index files in ${DEPLOY_DIR_IPK}
#
package_update_index_ipk () {
set -x
- ipkgarchs="${PACKAGE_ARCHS}"
+ ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}"
if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
return
fi
- touch ${DEPLOY_DIR_IPK}/Packages
- opkg-make-index -r ${DEPLOY_DIR_IPK}/Packages -p ${DEPLOY_DIR_IPK}/Packages -l ${DEPLOY_DIR_IPK}/Packages.filelist -m ${DEPLOY_DIR_IPK}
-
+ packagedirs="${DEPLOY_DIR_IPK}"
for arch in $ipkgarchs; do
- if [ -e ${DEPLOY_DIR_IPK}/$arch/ ] ; then
- touch ${DEPLOY_DIR_IPK}/$arch/Packages
- opkg-make-index -r ${DEPLOY_DIR_IPK}/$arch/Packages -p ${DEPLOY_DIR_IPK}/$arch/Packages -l ${DEPLOY_DIR_IPK}/$arch/Packages.filelist -m ${DEPLOY_DIR_IPK}/$arch/
- fi
- if [ -e ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/ ] ; then
- touch ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/Packages
- opkg-make-index -r ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/Packages -p ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/Packages -l ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/Packages.filelist -m ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/
+ packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
+ done
+
+ multilib_archs="${MULTILIB_ARCHS}"
+ for arch in $multilib_archs; do
+ packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
+ done
+
+ for pkgdir in $packagedirs; do
+ if [ -e $pkgdir/ ]; then
+ touch $pkgdir/Packages
+ flock $pkgdir/Packages.flock -c "opkg-make-index -r $pkgdir/Packages -p $pkgdir/Packages -m $pkgdir/"
fi
done
}
@@ -95,85 +226,79 @@ package_update_index_ipk () {
#
package_generate_ipkg_conf () {
package_generate_archlist
- echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_TARGET}
echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_SDK}
- ipkgarchs="${PACKAGE_ARCHS}"
+ ipkgarchs="${SDK_PACKAGE_ARCHS}"
for arch in $ipkgarchs; do
if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
- echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_TARGET}
+ echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_SDK}
fi
- if [ -e ${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk/Packages ] ; then
- echo "src oe-${BUILD_ARCH}-$arch-sdk file:${DEPLOY_DIR_IPK}/${BUILD_ARCH}-$arch-sdk" >> ${IPKGCONF_SDK}
+ done
+
+ echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_TARGET}
+ ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS}"
+ for arch in $ipkgarchs; do
+ if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
+ echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_TARGET}
fi
done
}
package_generate_archlist () {
- ipkgarchs="${PACKAGE_ARCHS}"
+ ipkgarchs="${SDK_PACKAGE_ARCHS}"
+ priority=1
+ for arch in $ipkgarchs; do
+ echo "arch $arch $priority" >> ${IPKGCONF_SDK}
+ priority=$(expr $priority + 5)
+ done
+
+ ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS}"
priority=1
for arch in $ipkgarchs; do
echo "arch $arch $priority" >> ${IPKGCONF_TARGET}
- echo "arch ${BUILD_ARCH}-$arch-sdk $priority" >> ${IPKGCONF_SDK}
priority=$(expr $priority + 5)
done
}
python do_package_ipk () {
- import sys, re, copy
+ import re, copy
+ import textwrap
- workdir = bb.data.getVar('WORKDIR', d, 1)
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('PKGWRITEDIRIPK', True)
+ tmpdir = d.getVar('TMPDIR', True)
+ pkgdest = d.getVar('PKGDEST', True)
+ if not workdir or not outdir or not tmpdir:
+ bb.error("Variables incorrectly set, unable to package")
return
- import os # path manipulations
- outdir = bb.data.getVar('DEPLOY_DIR_IPK', d, 1)
- if not outdir:
- bb.error("DEPLOY_DIR_IPK not defined, unable to package")
- return
-
- dvar = bb.data.getVar('D', d, 1)
- if not dvar:
- bb.error("D not defined, unable to package")
- return
- bb.mkdirhier(dvar)
-
- packages = bb.data.getVar('PACKAGES', d, 1)
- if not packages:
- bb.debug(1, "PACKAGES not defined, nothing to package")
+ packages = d.getVar('PACKAGES', True)
+ if not packages or packages == '':
+ bb.debug(1, "No packages; nothing to do")
return
- tmpdir = bb.data.getVar('TMPDIR', d, 1)
-
+ # We're about to add new packages so the index needs to be checked
+ # so remove the appropriate stamp file.
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
- if packages == []:
- bb.debug(1, "No packages; nothing to do")
- return
-
for pkg in packages.split():
localdata = bb.data.createCopy(d)
- pkgdest = bb.data.getVar('PKGDEST', d, 1)
root = "%s/%s" % (pkgdest, pkg)
lf = bb.utils.lockfile(root + ".lock")
- bb.data.setVar('ROOT', '', localdata)
- bb.data.setVar('ROOT_%s' % pkg, root, localdata)
- pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1)
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
if not pkgname:
pkgname = pkg
- bb.data.setVar('PKG', pkgname, localdata)
+ localdata.setVar('PKG', pkgname)
- overrides = bb.data.getVar('OVERRIDES', localdata, True)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
- bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+ localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
- arch = bb.data.getVar('PACKAGE_ARCH', localdata, 1)
+ arch = localdata.getVar('PACKAGE_ARCH', True)
pkgoutdir = "%s/%s" % (outdir, arch)
bb.mkdirhier(pkgoutdir)
os.chdir(root)
@@ -184,9 +309,8 @@ python do_package_ipk () {
del g[g.index('./CONTROL')]
except ValueError:
pass
- if not g and bb.data.getVar('ALLOW_EMPTY', localdata) != "1":
- from bb import note
- note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1)))
+ if not g and localdata.getVar('ALLOW_EMPTY') != "1":
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
bb.utils.unlockfile(lf)
continue
@@ -199,15 +323,16 @@ python do_package_ipk () {
raise bb.build.FuncFailed("unable to open control file for writing.")
fields = []
- pe = bb.data.getVar('PE', d, 1)
+ pe = d.getVar('PKGE', True)
if pe and int(pe) > 0:
- fields.append(["Version: %s:%s-%s\n", ['PE', 'PV', 'PR']])
+ fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
- fields.append(["Version: %s-%s\n", ['PV', 'PR']])
+ fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
fields.append(["Description: %s\n", ['DESCRIPTION']])
fields.append(["Section: %s\n", ['SECTION']])
fields.append(["Priority: %s\n", ['PRIORITY']])
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
+ fields.append(["License: %s\n", ['LICENSE']])
fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
fields.append(["OE: %s\n", ['PN']])
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
@@ -215,7 +340,7 @@ python do_package_ipk () {
def pullData(l, d):
l2 = []
for i in l:
- l2.append(bb.data.getVar(i, d, 1))
+ l2.append(d.getVar(i, True))
return l2
ctrlfile.write("Package: %s\n" % pkgname)
@@ -223,44 +348,54 @@ python do_package_ipk () {
try:
for (c, fs) in fields:
for f in fs:
- if bb.data.getVar(f, localdata) is None:
+ if localdata.getVar(f) is None:
raise KeyError(f)
- ctrlfile.write(c % tuple(pullData(fs, localdata)))
+ # Special behavior for description...
+ if 'DESCRIPTION' in fs:
+ summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
+ description = localdata.getVar('DESCRIPTION', True) or "."
+ description = textwrap.dedent(description).strip()
+ ctrlfile.write('Description: %s\n' % summary)
+ ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
+ else:
+ ctrlfile.write(c % tuple(pullData(fs, localdata)))
except KeyError:
+ import sys
(type, value, traceback) = sys.exc_info()
ctrlfile.close()
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
# more fields
- bb.build.exec_func("mapping_rename_hook", localdata)
+ mapping_rename_hook(localdata)
+
+ rdepends = bb.utils.explode_dep_versions(localdata.getVar("RDEPENDS", True) or "")
+ rrecommends = bb.utils.explode_dep_versions(localdata.getVar("RRECOMMENDS", True) or "")
+ rsuggests = bb.utils.explode_dep_versions(localdata.getVar("RSUGGESTS", True) or "")
+ rprovides = bb.utils.explode_dep_versions(localdata.getVar("RPROVIDES", True) or "")
+ rreplaces = bb.utils.explode_dep_versions(localdata.getVar("RREPLACES", True) or "")
+ rconflicts = bb.utils.explode_dep_versions(localdata.getVar("RCONFLICTS", True) or "")
- rdepends = bb.utils.explode_deps(bb.data.getVar("RDEPENDS", localdata, 1) or "")
- rrecommends = bb.utils.explode_deps(bb.data.getVar("RRECOMMENDS", localdata, 1) or "")
- rsuggests = (bb.data.getVar("RSUGGESTS", localdata, 1) or "").split()
- rprovides = (bb.data.getVar("RPROVIDES", localdata, 1) or "").split()
- rreplaces = (bb.data.getVar("RREPLACES", localdata, 1) or "").split()
- rconflicts = (bb.data.getVar("RCONFLICTS", localdata, 1) or "").split()
if rdepends:
- ctrlfile.write("Depends: %s\n" % ", ".join(rdepends))
+ ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
if rsuggests:
- ctrlfile.write("Suggests: %s\n" % ", ".join(rsuggests))
+ ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
if rrecommends:
- ctrlfile.write("Recommends: %s\n" % ", ".join(rrecommends))
+ ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
if rprovides:
- ctrlfile.write("Provides: %s\n" % ", ".join(rprovides))
+ ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
if rreplaces:
- ctrlfile.write("Replaces: %s\n" % ", ".join(rreplaces))
+ ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
if rconflicts:
- ctrlfile.write("Conflicts: %s\n" % ", ".join(rconflicts))
- src_uri = bb.data.getVar("SRC_URI", localdata, 1)
+ ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
+ src_uri = localdata.getVar("SRC_URI", True) or "None"
if src_uri:
src_uri = re.sub("\s+", " ", src_uri)
ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
- scriptvar = bb.data.getVar('pkg_%s' % script, localdata, 1)
+ scriptvar = localdata.getVar('pkg_%s' % script, True)
if not scriptvar:
continue
try:
@@ -272,7 +407,7 @@ python do_package_ipk () {
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
- conffiles_str = bb.data.getVar("CONFFILES", localdata, 1)
+ conffiles_str = localdata.getVar("CONFFILES", True)
if conffiles_str:
try:
conffiles = file(os.path.join(controldir, 'conffiles'), 'w')
@@ -284,28 +419,43 @@ python do_package_ipk () {
conffiles.close()
os.chdir(basedir)
- ret = os.system("PATH=\"%s\" %s %s %s" % (bb.data.getVar("PATH", localdata, 1),
- bb.data.getVar("OPKGBUILDCMD",d,1), pkg, pkgoutdir))
+ ret = os.system("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", True),
+ d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir))
if ret != 0:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("opkg-build execution failed")
bb.utils.prunedir(controldir)
bb.utils.unlockfile(lf)
+
}
+SSTATETASKS += "do_package_write_ipk"
+do_package_write_ipk[sstate-name] = "deploy-ipk"
+do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
+do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
+
+python do_package_write_ipk_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_write_ipk_setscene
+
python () {
- import bb
- if bb.data.getVar('PACKAGES', d, True) != '':
- deps = (bb.data.getVarFlag('do_package_write_ipk', 'depends', d) or "").split()
- deps.append('opkg-utils-native:do_populate_staging')
- deps.append('fakeroot-native:do_populate_staging')
- bb.data.setVarFlag('do_package_write_ipk', 'depends', " ".join(deps), d)
+ if d.getVar('PACKAGES', True) != '':
+ deps = ' opkg-utils-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_ipk', 'depends', deps)
+ d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
+ d.setVarFlag('do_package_write_ipk_setscene', 'fakeroot', "1")
}
python do_package_write_ipk () {
bb.build.exec_func("read_subpackage_metadata", d)
bb.build.exec_func("do_package_ipk", d)
}
-do_package_write_ipk[dirs] = "${D}"
+do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
+do_package_write_ipk[umask] = "022"
addtask package_write_ipk before do_package_write after do_package
+
+PACKAGEINDEXES += "package_update_index_ipk;"
+PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"
diff --git a/meta/classes/package_rpm.bbclass b/meta/classes/package_rpm.bbclass
index d291733dc..2da7a8b85 100644
--- a/meta/classes/package_rpm.bbclass
+++ b/meta/classes/package_rpm.bbclass
@@ -1,233 +1,1136 @@
inherit package
-#IMAGE_PKGTYPE ?= "rpm"
-
-RPMBUILD="rpmbuild --short-circuit ${RPMOPTS}"
IMAGE_PKGTYPE ?= "rpm"
-RPMBUILDPATH="${WORKDIR}/rpm"
+RPM="rpm"
+RPMBUILD="rpmbuild"
-RPMOPTS="--rcfile=${WORKDIR}/rpmrc"
-RPMOPTS="--rcfile=${WORKDIR}/rpmrc --target ${TARGET_SYS}"
-RPM="rpm ${RPMOPTS}"
+PKGWRITEDIRRPM = "${WORKDIR}/deploy-rpms"
+PKGWRITEDIRSRPM = "${DEPLOY_DIR}/sources/deploy-srpm"
-python write_specfile() {
- from bb import data, build
- import sys
+python package_rpm_fn () {
+ d.setVar('PKGFN', d.getVar('PKG'))
+}
- version = bb.data.getVar('PV', d, 1)
- version = version.replace('-', '+')
- bb.data.setVar('RPMPV', version, d)
+python package_rpm_install () {
+ bb.fatal("package_rpm_install not implemented!")
+}
- out_vartranslate = {
- "PKG": "Name",
- "RPMPV": "Version",
- "PR": "Release",
- "DESCRIPTION": "%description",
- "ROOT": "BuildRoot",
- "LICENSE": "License",
- "SECTION": "Group",
- "pkg_postinst": "%post",
- "pkg_preinst": "%pre",
- }
+RPMCONF_TARGET_BASE = "${DEPLOY_DIR_RPM}/solvedb"
+RPMCONF_HOST_BASE = "${DEPLOY_DIR_RPM}/solvedb-sdk"
+#
+# Update the Packages depsolver db in ${DEPLOY_DIR_RPM}
+#
+package_update_index_rpm () {
+ if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
+ return
+ fi
- root = bb.data.getVar('ROOT', d)
+ # Update target packages
+ base_archs="${PACKAGE_ARCHS}"
+ ml_archs="${MULTILIB_PACKAGE_ARCHS}"
+ package_update_index_rpm_common "${RPMCONF_TARGET_BASE}" base_archs ml_archs
- # get %files
- filesvar = bb.data.expand(bb.data.getVar('FILES', d), d) or ""
- from glob import glob
- files = filesvar.split()
- todelete = []
- for file in files:
- if file[0] == '.':
- newfile = file[1:]
- files[files.index(file)] = newfile
- file = newfile
- else:
- newfile = file
- realfile = os.path.join(root, './'+file)
- if not glob(realfile):
- todelete.append(files[files.index(newfile)])
- for r in todelete:
- try:
- del files[files.index(r)]
- except ValueError:
- pass
+ # Update SDK packages
+ base_archs="${SDK_PACKAGE_ARCHS}"
+ package_update_index_rpm_common "${RPMCONF_HOST_BASE}" base_archs
+}
- if not files and bb.data.getVar('ALLOW_EMPTY', d) != "1":
- from bb import note
- note("Not creating empty archive for %s-%s-%s" % (bb.data.getVar('PKG',d, 1), bb.data.getVar('PV', d, 1), bb.data.getVar('PR', d, 1)))
- return
+package_update_index_rpm_common () {
+ rpmconf_base="$1"
+ shift
- # output .spec using this metadata store
- try:
- from __builtin__ import file
- if not bb.data.getVar('OUTSPECFILE', d):
- raise OSError('eek!')
- specfile = file(bb.data.getVar('OUTSPECFILE', d), 'w')
- except OSError:
- raise bb.build.FuncFailed("unable to open spec file for writing.")
+ createdirs=""
+ for archvar in "$@"; do
+ eval archs=\${${archvar}}
+ packagedirs=""
+ for arch in $archs; do
+ packagedirs="${DEPLOY_DIR_RPM}/$arch $packagedirs"
+ rm -rf ${DEPLOY_DIR_RPM}/$arch/solvedb.done
+ done
- fd = specfile
- for var in out_vartranslate.keys():
- if out_vartranslate[var][0] == "%":
- continue
- val = bb.data.getVar(var, d, 1)
- if val:
- fd.write("%s\t: %s\n" % (out_vartranslate[var], val))
+ cat /dev/null > ${rpmconf_base}-${archvar}.conf
+ for pkgdir in $packagedirs; do
+ if [ -e $pkgdir/ ]; then
+ echo "Generating solve db for $pkgdir..."
+ echo $pkgdir/solvedb >> ${rpmconf_base}-${archvar}.conf
+ createdirs="$createdirs $pkgdir"
+ fi
+ done
+ done
+ rpm-createsolvedb.py "${RPM}" $createdirs
+}
- fd.write("AutoReqProv: no\n")
+#
+# Generate an rpm configuration suitable for use against the
+# generated depsolver db's...
+#
+package_generate_rpm_conf () {
+ # Update target packages
+ package_generate_rpm_conf_common "${RPMCONF_TARGET_BASE}" base_archs ml_archs
- def fix_dep_versions(varname):
- depends = bb.utils.explode_dep_versions(bb.data.getVar(varname, d, True) or "")
- newdeps = []
- for dep in depends:
- ver = depends[dep]
- if dep and ver:
- if '-' in ver:
- subd = read_subpkgdata_dict(dep, d)
- pv = subd['PV']
- reppv = pv.replace('-', '+')
- ver = ver.replace(pv, reppv)
- newdeps.append("%s (%s)" % (dep, ver))
- elif dep:
- newdeps.append(dep)
- bb.data.setVar(varname, " ".join(newdeps), d)
+ # Update SDK packages
+ package_generate_rpm_conf_common "${RPMCONF_HOST_BASE}" base_archs
+}
- fix_dep_versions('RDEPENDS')
- fix_dep_versions('RRECOMMENDS')
+package_generate_rpm_conf_common() {
+ rpmconf_base="$1"
+ shift
- bb.build.exec_func("mapping_rename_hook", d)
+ printf "_solve_dbpath " > ${rpmconf_base}.macro
+ o_colon="false"
- def write_dep_field(varname, outstring):
- depends = bb.utils.explode_dep_versions(bb.data.getVar(varname, d, True) or "")
- for dep in depends:
- ver = depends[dep]
- if dep and ver:
- fd.write("%s: %s %s\n" % (outstring, dep, ver))
- elif dep:
- fd.write("%s: %s\n" % (outstring, dep))
+ for archvar in "$@"; do
+ printf "_solve_dbpath " > ${rpmconf_base}-${archvar}.macro
+ colon="false"
+ for each in `cat ${rpmconf_base}-${archvar}.conf` ; do
+ if [ "$o_colon" = "true" ]; then
+ printf ":" >> ${rpmconf_base}.macro
+ fi
+ if [ "$colon" = "true" ]; then
+ printf ":" >> ${rpmconf_base}-${archvar}.macro
+ fi
+ printf "%s" $each >> ${rpmconf_base}.macro
+ o_colon="true"
+ printf "%s" $each >> ${rpmconf_base}-${archvar}.macro
+ colon="true"
+ done
+ printf "\n" >> ${rpmconf_base}-${archvar}.macro
+ done
+ printf "\n" >> ${rpmconf_base}.macro
+}
- write_dep_field('RDEPENDS', 'Requires')
- write_dep_field('RRECOMMENDS', 'Recommends')
+rpm_log_check() {
+ target="$1"
+ lf_path="$2"
- fd.write("Summary\t: .\n")
+ lf_txt="`cat $lf_path`"
+ for keyword_die in "Cannot find package" "exit 1" ERR Fail
+ do
+ if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
+ then
+ echo "log_check: There were error messages in the logfile"
+ echo -e "log_check: Matched keyword: [$keyword_die]\n"
+ echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
+ echo ""
+ do_exit=1
+ fi
+ done
+ test "$do_exit" = 1 && exit 1
+ true
+}
- for var in out_vartranslate.keys():
- if out_vartranslate[var][0] != "%":
- continue
- val = bb.data.getVar(var, d)
- if val:
- fd.write(out_vartranslate[var] + "\n")
- fd.write(val + "\n\n")
- fd.write("%files\n")
- for file in files:
- if file[0] != '/':
- fd.write('/')
- fd.write("%s\n" % file)
+#
+# Resolve package names to filepaths
+# resolve_pacakge <pkgname> <solvdb conffile>
+#
+resolve_package_rpm () {
+ local conffile="$1"
+ shift
+ local pkg_name=""
+ for solve in `cat ${conffile}`; do
+ pkg_name=$(${RPM} -D "_dbpath $solve" -D "__dbi_txn create nofsync" -q --yaml $@ | grep -i 'Packageorigin' | cut -d : -f 2)
+ if [ -n "$pkg_name" ]; then
+ break;
+ fi
+ done
+ echo $pkg_name
+}
- fd.close()
+# rpm common command and options
+rpm_common_comand () {
- # call out rpm -bb on the .spec, thereby creating an rpm
+ local target_rootfs="${INSTALL_ROOTFS_RPM}"
- bb.note(bb.data.expand("${RPMBUILD} -bb ${OUTSPECFILE}", d))
+ ${RPM} --root ${target_rootfs} \
+ --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_var ${localstatedir}" \
+ -D "_dbpath ${rpmlibdir}" \
+ -D "_tmppath /install/tmp" \
+ --noparentdirs --nolinktos \
+ -D "__dbi_txn create nofsync private" \
+ -D "_cross_scriptlet_wrapper ${WORKDIR}/scriptlet_wrapper" $@
+}
- bb.data.setVar('BUILDSPEC', "${RPMBUILD} -bb ${OUTSPECFILE}\n", d)
- bb.data.setVarFlag('BUILDSPEC', 'func', '1', d)
- bb.build.exec_func('BUILDSPEC', d)
+# install or remove the pkg
+rpm_update_pkg () {
- # move the rpm into the pkgoutdir
- rpm = bb.data.expand('${RPMBUILDPATH}/RPMS/${TARGET_ARCH}/${PKG}-${RPMPV}-${PR}.${TARGET_ARCH}.rpm', d)
- outrpm = bb.data.expand('${DEPLOY_DIR_RPM}/${PACKAGE_ARCH}/${PKG}-${RPMPV}-${PR}.${TARGET_ARCH}.rpm', d)
- bb.movefile(rpm, outrpm)
+ manifest=$1
+ btmanifest=$manifest.bt
+ local target_rootfs="${INSTALL_ROOTFS_RPM}"
+
+ # Save the rpm's build time for incremental image generation, and the file
+ # would be moved to ${T}
+ rm -f $btmanifest
+ for i in `cat $manifest`; do
+ # Use "rpm" rather than "${RPM}" here, since we don't need the
+ # '--dbpath' option
+ echo "$i `rpm -qp --qf '%{BUILDTIME}\n' $i`" >> $btmanifest
+ done
+
+ # Only install the different pkgs if incremental image generation is set
+ if [ "${INC_RPM_IMAGE_GEN}" = "1" -a -f ${T}/total_solution_bt.manifest -a \
+ "${IMAGE_PKGTYPE}" = "rpm" ]; then
+ cur_list="$btmanifest"
+ pre_list="${T}/total_solution_bt.manifest"
+ sort -u $cur_list -o $cur_list
+ sort -u $pre_list -o $pre_list
+ comm -1 -3 $cur_list $pre_list | sed 's#.*/\(.*\)\.rpm .*#\1#' > \
+ ${target_rootfs}/install/remove.manifest
+ comm -2 -3 $cur_list $pre_list | awk '{print $1}' > \
+ ${target_rootfs}/install/incremental.manifest
+
+ # Attempt to remove unwanted pkgs, the scripts(pre, post, etc.) has not
+ # been run by now, so don't have to run them(preun, postun, etc.) when
+ # erase the pkg
+ if [ -s ${target_rootfs}/install/remove.manifest ]; then
+ rpm_common_comand --noscripts --nodeps \
+ -e `cat ${target_rootfs}/install/remove.manifest`
+ fi
+
+ # Attempt to install the incremental pkgs
+ rpm_common_comand --nodeps --replacefiles --replacepkgs \
+ -Uvh ${target_rootfs}/install/incremental.manifest
+ else
+ # Attempt to install
+ rpm_common_comand --replacepkgs -Uhv $manifest
+ fi
}
+#
+# install a bunch of packages using rpm
+# the following shell variables needs to be set before calling this func:
+# INSTALL_ROOTFS_RPM - install root dir
+# INSTALL_PLATFORM_RPM - main platform
+# INSTALL_PLATFORM_EXTRA_RPM - extra platform
+# INSTALL_CONFBASE_RPM - configuration file base name
+# INSTALL_PACKAGES_RPM - packages to be installed
+# INSTALL_PACKAGES_ATTEMPTONLY_RPM - packages attemped to be installed only
+# INSTALL_PACKAGES_LINGUAS_RPM - additional packages for uclibc
+# INSTALL_PROVIDENAME_RPM - content for provide name
+# INSTALL_TASK_RPM - task name
+
+package_install_internal_rpm () {
+
+ local target_rootfs="${INSTALL_ROOTFS_RPM}"
+ local platform="${INSTALL_PLATFORM_RPM}"
+ local platform_extra="${INSTALL_PLATFORM_EXTRA_RPM}"
+ local confbase="${INSTALL_CONFBASE_RPM}"
+ local package_to_install="${INSTALL_PACKAGES_RPM}"
+ local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_RPM}"
+ local package_linguas="${INSTALL_PACKAGES_LINGUAS_RPM}"
+ local providename="${INSTALL_PROVIDENAME_RPM}"
+ local task="${INSTALL_TASK_RPM}"
+
+ # Setup base system configuration
+ mkdir -p ${target_rootfs}/etc/rpm/
+ echo "${platform}${TARGET_VENDOR}-${TARGET_OS}" > ${target_rootfs}/etc/rpm/platform
+ if [ ! -z "$platform_extra" ]; then
+ for pt in $platform_extra ; do
+ case $pt in
+ noarch | any | all)
+ os="`echo ${TARGET_OS} | sed "s,-.*,,"`.*"
+ ;;
+ *)
+ os="${TARGET_OS}"
+ ;;
+ esac
+ echo "$pt-.*-$os" >> ${target_rootfs}/etc/rpm/platform
+ done
+ fi
+
+ # Tell RPM that the "/" directory exist and is available
+ mkdir -p ${target_rootfs}/etc/rpm/sysinfo
+ echo "/" >${target_rootfs}/etc/rpm/sysinfo/Dirnames
+ if [ ! -z "$providename" ]; then
+ cat /dev/null > ${target_rootfs}/etc/rpm/sysinfo/Providename
+ for provide in $providename ; do
+ echo $provide >> ${target_rootfs}/etc/rpm/sysinfo/Providename
+ done
+ fi
+
+ # Setup manifest of packages to install...
+ mkdir -p ${target_rootfs}/install
+ echo "# Install manifest" > ${target_rootfs}/install/install.manifest
+
+ # Uclibc builds don't provide this stuff...
+ if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
+ if [ ! -z "${package_linguas}" ]; then
+ for pkg in ${package_linguas}; do
+ echo "Processing $pkg..."
+
+ archvar=base_archs
+ manifest=install.manifest
+ ml_prefix=`echo ${pkg} | cut -d'-' -f1`
+ ml_pkg=$pkg
+ for i in ${MULTILIB_PREFIX_LIST} ; do
+ if [ ${ml_prefix} = ${i} ]; then
+ ml_pkg=$(echo ${pkg} | sed "s,^${ml_prefix}-\(.*\),\1,")
+ archvar=ml_archs
+ manifest=install_multilib.manifest
+ break
+ fi
+ done
+
+ pkg_name=$(resolve_package_rpm ${confbase}-${archvar}.conf ${ml_pkg})
+ if [ -z "$pkg_name" ]; then
+ echo "Unable to find package $pkg ($ml_pkg)!"
+ exit 1
+ fi
+ echo $pkg_name >> ${target_rootfs}/install/${manifest}
+ done
+ fi
+ fi
+ if [ ! -z "${package_to_install}" ]; then
+ for pkg in ${package_to_install} ; do
+ echo "Processing $pkg..."
+
+ archvar=base_archs
+ manifest=install.manifest
+ ml_prefix=`echo ${pkg} | cut -d'-' -f1`
+ ml_pkg=$pkg
+ for i in ${MULTILIB_PREFIX_LIST} ; do
+ if [ ${ml_prefix} = ${i} ]; then
+ ml_pkg=$(echo ${pkg} | sed "s,^${ml_prefix}-\(.*\),\1,")
+ archvar=ml_archs
+ manifest=install_multilib.manifest
+ break
+ fi
+ done
-rpm_prep() {
- if [ ! -e ${WORKDIR}/rpmrc ]; then
- mkdir -p ${RPMBUILDPATH}/{SPECS,RPMS/{i386,i586,i686,noarch,ppc,mips,mipsel,arm},SRPMS,SOURCES,BUILD}
- echo 'macrofiles:${STAGING_DIR_NATIVE}/usr/lib/rpm/macros:${WORKDIR}/macros' > ${WORKDIR}/rpmrc
- echo '%_topdir ${RPMBUILDPATH}' > ${WORKDIR}/macros
- echo '%_repackage_dir ${WORKDIR}' >> ${WORKDIR}/macros
+ pkg_name=$(resolve_package_rpm ${confbase}-${archvar}.conf ${ml_pkg})
+ if [ -z "$pkg_name" ]; then
+ echo "Unable to find package $pkg ($ml_pkg)!"
+ exit 1
+ fi
+ echo $pkg_name >> ${target_rootfs}/install/${manifest}
+ done
fi
+
+ # Normal package installation
+
+ # Generate an install solution by doing a --justdb install, then recreate it with
+ # an actual package install!
+ ${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}-base_archs.macro`" \
+ -D "__dbi_txn create nofsync" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ ${target_rootfs}/install/install.manifest
+
+ if [ ! -z "${package_attemptonly}" ]; then
+ echo "Adding attempt only packages..."
+ for pkg in ${package_attemptonly} ; do
+ echo "Processing $pkg..."
+ archvar=base_archs
+ ml_prefix=`echo ${pkg} | cut -d'-' -f1`
+ ml_pkg=$pkg
+ for i in ${MULTILIB_PREFIX_LIST} ; do
+ if [ ${ml_prefix} = ${i} ]; then
+ ml_pkg=$(echo ${pkg} | sed "s,^${ml_prefix}-\(.*\),\1,")
+ archvar=ml_archs
+ break
+ fi
+ done
+
+ pkg_name=$(resolve_package_rpm ${confbase}-${archvar}.conf ${ml_pkg})
+ if [ -z "$pkg_name" ]; then
+ echo "Note: Unable to find package $pkg ($ml_pkg) -- PACKAGE_INSTALL_ATTEMPTONLY"
+ continue
+ fi
+ echo "Attempting $pkg_name..." >> "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}"
+ ${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync private" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ $pkg_name >> "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}" || true
+ done
+ fi
+
+ #### Note: 'Recommends' is an arbitrary tag that means _SUGGESTS_ in OE-core..
+ # Add any recommended packages to the image
+ # RPM does not solve for recommended packages because they are optional...
+ # So we query them and tree them like the ATTEMPTONLY packages above...
+ # Change the loop to "1" to run this code...
+ loop=0
+ if [ $loop -eq 1 ]; then
+ echo "Processing recommended packages..."
+ cat /dev/null > ${target_rootfs}/install/recommend.list
+ while [ $loop -eq 1 ]; do
+ # Dump the full set of recommends...
+ ${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync private" \
+ -qa --qf "[%{RECOMMENDS}\n]" | sort -u > ${target_rootfs}/install/recommend
+ # Did we add more to the list?
+ grep -v -x -F -f ${target_rootfs}/install/recommend.list ${target_rootfs}/install/recommend > ${target_rootfs}/install/recommend.new || true
+ # We don't want to loop unless there is a change to the list!
+ loop=0
+ cat ${target_rootfs}/install/recommend.new | \
+ while read pkg ; do
+ # Ohh there was a new one, we'll need to loop again...
+ loop=1
+ echo "Processing $pkg..."
+ found=0
+ for archvar in base_archs ml_archs ; do
+ pkg_name=$(resolve_package_rpm ${confbase}-${archvar}.conf ${pkg})
+ if [ -n "$pkg_name" ]; then
+ found=1
+ break
+ fi
+ done
+
+ if [ $found -eq 0 ]; then
+ echo "Note: Unable to find package $pkg -- suggests"
+ echo "Unable to find package $pkg." >> "${WORKDIR}/temp/log.do_${task}_recommend.${PID}"
+ continue
+ fi
+ echo "Attempting $pkg_name..." >> "${WORKDIR}/temp/log.do_{task}_recommend.${PID}"
+ ${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync private" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ $pkg_name >> "${WORKDIR}/temp/log.do_${task}_recommend.${PID}" 2>&1 || true
+ done
+ cat ${target_rootfs}/install/recommend.list ${target_rootfs}/install/recommend.new | sort -u > ${target_rootfs}/install/recommend.new.list
+ mv -f ${target_rootfs}/install/recommend.new.list ${target_rootfs}/install/recommend.list
+ rm ${target_rootfs}/install/recommend ${target_rootfs}/install/recommend.new
+ done
+ fi
+
+ # Now that we have a solution, pull out a list of what to install...
+ echo "Manifest: ${target_rootfs}/install/install.manifest"
+ ${RPM} -D "_dbpath ${target_rootfs}/install" -qa --yaml \
+ -D "__dbi_txn create nofsync private" \
+ | grep -i 'Packageorigin' | cut -d : -f 2 > ${target_rootfs}/install/install_solution.manifest
+
+ touch ${target_rootfs}/install/install_multilib_solution.manifest
+
+ if [ -e "${target_rootfs}/install/install_multilib.manifest" ]; then
+ # multilib package installation
+
+ # Generate an install solution by doing a --justdb install, then recreate it with
+ # an actual package install!
+ ${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}-ml_archs.macro`" \
+ -D "__dbi_txn create nofsync" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ ${target_rootfs}/install/install_multilib.manifest
+
+ # Now that we have a solution, pull out a list of what to install...
+ echo "Manifest: ${target_rootfs}/install/install_multilib.manifest"
+ ${RPM} -D "_dbpath ${target_rootfs}/install" -qa --yaml \
+ -D "__dbi_txn create nofsync private" \
+ | grep -i 'Packageorigin' | cut -d : -f 2 > ${target_rootfs}/install/install_multilib_solution.manifest
+
+ fi
+
+ cat ${target_rootfs}/install/install_solution.manifest > ${target_rootfs}/install/total_solution.manifest
+ cat ${target_rootfs}/install/install_multilib_solution.manifest >> ${target_rootfs}/install/total_solution.manifest
+
+ # Construct install scriptlet wrapper
+ cat << EOF > ${WORKDIR}/scriptlet_wrapper
+#!/bin/bash
+
+export PATH="${PATH}"
+export D="${target_rootfs}"
+export OFFLINE_ROOT="\$D"
+export IPKG_OFFLINE_ROOT="\$D"
+export OPKG_OFFLINE_ROOT="\$D"
+
+\$2 \$1/\$3 \$4
+if [ \$? -ne 0 ]; then
+ mkdir -p \$1/etc/rpm-postinsts
+ num=100
+ while [ -e \$1/etc/rpm-postinsts/\${num} ]; do num=\$((num + 1)); done
+ echo "#!\$2" > \$1/etc/rpm-postinsts/\${num}
+ echo "# Arg: \$4" >> \$1/etc/rpm-postinsts/\${num}
+ cat \$1/\$3 >> \$1/etc/rpm-postinsts/\${num}
+ chmod +x \$1/etc/rpm-postinsts/\${num}
+fi
+EOF
+
+ chmod 0755 ${WORKDIR}/scriptlet_wrapper
+
+ # RPM is special. It can't handle dependencies and preinstall scripts correctly. Its
+ # probably a feature. The only way to convince rpm to actually run the preinstall scripts
+ # for base-passwd and shadow first before installing packages that depend on these packages
+ # is to do two image installs, installing one set of packages, then the other.
+ if [ "${INC_RPM_IMAGE_GEN}" = "1" -a -f ${T}/total_solution_bt.manifest ]; then
+ echo "Skipping pre install due to exisitng image"
+ else
+ echo "# Initial Install manifest" > ${target_rootfs}/install/initial_install.manifest
+ echo "Installing base dependencies first (base-passwd, base-files and shadow) since rpm is special"
+ grep /base-passwd-[0-9] ${target_rootfs}/install/total_solution.manifest >> ${target_rootfs}/install/initial_install.manifest || true
+ grep /base-files-[0-9] ${target_rootfs}/install/total_solution.manifest >> ${target_rootfs}/install/initial_install.manifest || true
+ grep /shadow-[0-9] ${target_rootfs}/install/total_solution.manifest >> ${target_rootfs}/install/initial_install.manifest || true
+
+ # Generate an install solution by doing a --justdb install, then recreate it with
+ # an actual package install!
+ mkdir -p ${target_rootfs}/initial
+
+ ${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
+ --predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
+ -D "_dbpath ${target_rootfs}/initial" -D "`cat ${confbase}.macro`" \
+ -D "__dbi_txn create nofsync" \
+ -U --justdb --noscripts --notriggers --noparentdirs --nolinktos --ignoresize \
+ ${target_rootfs}/install/initial_install.manifest
+
+ ${RPM} -D "_dbpath ${target_rootfs}/initial" -qa --yaml \
+ -D "__dbi_txn create nofsync private" \
+ | grep -i 'Packageorigin' | cut -d : -f 2 > ${target_rootfs}/install/initial_solution.manifest
+
+ rpm_update_pkg ${target_rootfs}/install/initial_solution.manifest
+
+ grep -Fv -f ${target_rootfs}/install/initial_solution.manifest ${target_rootfs}/install/total_solution.manifest > ${target_rootfs}/install/total_solution.manifest.new
+ mv ${target_rootfs}/install/total_solution.manifest.new ${target_rootfs}/install/total_solution.manifest
+
+ rm -rf ${target_rootfs}/initial
+ fi
+
+ echo "Installing main solution manifest (${target_rootfs}/install/total_solution.manifest)"
+
+ rpm_update_pkg ${target_rootfs}/install/total_solution.manifest
}
-python do_package_rpm () {
- workdir = bb.data.getVar('WORKDIR', d, 1)
- if not workdir:
- bb.error("WORKDIR not defined, unable to package")
- return
+python write_specfile () {
+ import textwrap
+ import oe.packagedata
+
+ # append information for logs and patches to %prep
+ def add_prep(d,spec_files_bottom):
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) and d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ spec_files_bottom.append('%%prep -n %s' % d.getVar('PN', True) )
+ spec_files_bottom.append('%s' % "echo \"include logs and patches, Please check them in SOURCES\"")
+ spec_files_bottom.append('')
+
+ # get the name of tarball for sources, patches and logs
+ def get_tarballs(d):
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) and d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ return get_package(d)
+
+ # append the name of tarball to key word 'SOURCE' in xxx.spec.
+ def tail_source(d,source_list=[],patch_list=None):
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) and d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ source_number = 0
+ patch_number = 0
+ for source in source_list:
+ spec_preamble_top.append('Source' + str(source_number) + ': %s' % source)
+ source_number += 1
+ if patch_list:
+ for patch in patch_list:
+ print_deps(patch, "Patch" + str(patch_number), spec_preamble_top, d)
+ patch_number += 1
+ # We need a simple way to remove the MLPREFIX from the package name,
+ # and dependency information...
+ def strip_multilib(name, d):
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') >= 0:
+ name = "".join(name.split(eext[1] + '-'))
+ return name
+
+# ml = d.getVar("MLPREFIX", True)
+# if ml and name and len(ml) != 0 and name.find(ml) == 0:
+# return ml.join(name.split(ml, 1)[1:])
+# return name
+
+ # In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
+ # This format is similar to OE, however there are restrictions on the
+ # characters that can be in a field. In the Version field, "-"
+ # characters are not allowed. "-" is allowed in the Release field.
+ #
+ # We translate the "-" in the version to a "+", by loading the PKGV
+ # from the dependent recipe, replacing the - with a +, and then using
+ # that value to do a replace inside of this recipe's dependencies.
+ # This preserves the "-" separator between the version and release, as
+ # well as any "-" characters inside of the release field.
+ #
+ # All of this has to happen BEFORE the mapping_rename_hook as
+ # after renaming we cannot look up the dependencies in the packagedata
+ # store.
+ def translate_vers(varname, d):
+ depends = d.getVar(varname, True)
+ if depends:
+ depends_dict = bb.utils.explode_dep_versions(depends)
+ newdeps_dict = {}
+ for dep in depends_dict:
+ ver = depends_dict[dep]
+ if dep and ver:
+ if '-' in ver:
+ subd = oe.packagedata.read_subpkgdata_dict(dep, d)
+ if 'PKGV' in subd:
+ pv = subd['PKGV']
+ reppv = pv.replace('-', '+')
+ ver = ver.replace(pv, reppv)
+ newdeps_dict[dep] = ver
+ depends = bb.utils.join_deps(newdeps_dict)
+ d.setVar(varname, depends.strip())
- import os # path manipulations
- outdir = bb.data.getVar('DEPLOY_DIR_RPM', d, 1)
- if not outdir:
- bb.error("DEPLOY_DIR_RPM not defined, unable to package")
+ # We need to change the style the dependency from BB to RPM
+ # This needs to happen AFTER the mapping_rename_hook
+ def print_deps(variable, tag, array, d):
+ depends = variable
+ if depends:
+ depends_dict = bb.utils.explode_dep_versions(depends)
+ for dep in depends_dict:
+ ver = depends_dict[dep]
+ if dep and ver:
+ ver = ver.replace('(', '')
+ ver = ver.replace(')', '')
+ array.append("%s: %s %s" % (tag, dep, ver))
+ else:
+ array.append("%s: %s" % (tag, dep))
+
+ def walk_files(walkpath, target, conffiles):
+ import os
+ for rootpath, dirs, files in os.walk(walkpath):
+ path = rootpath.replace(walkpath, "")
+ for dir in dirs:
+ # All packages own the directories their files are in...
+ target.append('%dir "' + path + '/' + dir + '"')
+ for file in files:
+ if conffiles.count(path + '/' + file):
+ target.append('%config "' + path + '/' + file + '"')
+ else:
+ target.append('"' + path + '/' + file + '"')
+
+ # Prevent the prerm/postrm scripts from being run during an upgrade
+ def wrap_uninstall(scriptvar):
+ scr = scriptvar.strip()
+ if scr.startswith("#!"):
+ pos = scr.find("\n") + 1
+ else:
+ pos = 0
+ scr = scr[:pos] + 'if [ "$1" = "0" ] ; then\n' + scr[pos:] + '\nfi'
+ return scr
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages or packages == '':
+ bb.debug(1, "No packages; nothing to do")
return
- bb.mkdirhier(outdir)
- packages = bb.data.getVar('PACKAGES', d, 1)
- if not packages:
- bb.debug(1, "PACKAGES not defined, nothing to package")
+ pkgdest = d.getVar('PKGDEST', True)
+ if not pkgdest:
+ bb.fatal("No PKGDEST")
return
- if packages == []:
- bb.debug(1, "No packages; nothing to do")
+ outspecfile = d.getVar('OUTSPECFILE', True)
+ if not outspecfile:
+ bb.fatal("No OUTSPECFILE")
return
- # If "rpm" comes into overrides the presence of this function causes problems.
- # Since we don't need it, remove it for now - hacky.
- bb.data.delVar("do_package_write_rpm", d)
+ # Construct the SPEC file...
+ srcname = strip_multilib(d.getVar('PN', True), d)
+ srcsummary = (d.getVar('SUMMARY', True) or d.getVar('DESCRIPTION', True) or ".")
+ srcversion = d.getVar('PKGV', True).replace('-', '+')
+ srcrelease = d.getVar('PKGR', True)
+ srcepoch = (d.getVar('PKGE', True) or "")
+ srclicense = d.getVar('LICENSE', True)
+ srcsection = d.getVar('SECTION', True)
+ srcmaintainer = d.getVar('MAINTAINER', True)
+ srchomepage = d.getVar('HOMEPAGE', True)
+ srcdescription = d.getVar('DESCRIPTION', True) or "."
+
+ srcdepends = strip_multilib(d.getVar('DEPENDS', True), d)
+ srcrdepends = []
+ srcrrecommends = []
+ srcrsuggests = []
+ srcrprovides = []
+ srcrreplaces = []
+ srcrconflicts = []
+ srcrobsoletes = []
+
+ srcpreinst = []
+ srcpostinst = []
+ srcprerm = []
+ srcpostrm = []
+
+ spec_preamble_top = []
+ spec_preamble_bottom = []
+
+ spec_scriptlets_top = []
+ spec_scriptlets_bottom = []
+
+ spec_files_top = []
+ spec_files_bottom = []
for pkg in packages.split():
localdata = bb.data.createCopy(d)
- pkgdest = bb.data.getVar('PKGDEST', d, 1)
+
root = "%s/%s" % (pkgdest, pkg)
lf = bb.utils.lockfile(root + ".lock")
- bb.data.setVar('ROOT', '', localdata)
- bb.data.setVar('ROOT_%s' % pkg, root, localdata)
- pkgname = bb.data.getVar('PKG_%s' % pkg, localdata, 1)
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ pkgname = localdata.getVar('PKG_%s' % pkg, True)
if not pkgname:
pkgname = pkg
- bb.data.setVar('PKG', pkgname, localdata)
+ localdata.setVar('PKG', pkgname)
- overrides = bb.data.getVar('OVERRIDES', localdata)
- if not overrides:
- raise bb.build.FuncFailed('OVERRIDES not defined')
- overrides = bb.data.expand(overrides, localdata)
- bb.data.setVar('OVERRIDES', overrides + ':' + pkg, localdata)
+ localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
- basedir = os.path.join(os.path.dirname(root))
- pkgoutdir = os.path.join(outdir, bb.data.getVar('PACKAGE_ARCH', localdata, 1))
- bb.mkdirhier(pkgoutdir)
- bb.data.setVar('OUTSPECFILE', os.path.join(workdir, "%s.spec" % pkg), localdata)
- # Save the value of RPMBUILD expanded into the new dictonary so any
- # changes in the compoents that make up workdir don't break packaging
- bb.data.setVar('RPMBUILD', bb.data.getVar("RPMBUILD", d, True), localdata)
- bb.data.setVar('RPMBUILDPATH', bb.data.getVar("RPMBUILDPATH", d, True), localdata)
- bb.build.exec_func('write_specfile', localdata)
+
+ conffiles = (localdata.getVar('CONFFILES', True) or "").split()
+
+ splitname = strip_multilib(pkgname, d)
+
+ splitsummary = (localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or ".")
+ splitversion = (localdata.getVar('PKGV', True) or "").replace('-', '+')
+ splitrelease = (localdata.getVar('PKGR', True) or "")
+ splitepoch = (localdata.getVar('PKGE', True) or "")
+ splitlicense = (localdata.getVar('LICENSE', True) or "")
+ splitsection = (localdata.getVar('SECTION', True) or "")
+ splitdescription = (localdata.getVar('DESCRIPTION', True) or ".")
+
+ translate_vers('RDEPENDS', localdata)
+ translate_vers('RRECOMMENDS', localdata)
+ translate_vers('RSUGGESTS', localdata)
+ translate_vers('RPROVIDES', localdata)
+ translate_vers('RREPLACES', localdata)
+ translate_vers('RCONFLICTS', localdata)
+
+ # Map the dependencies into their final form
+ mapping_rename_hook(localdata)
+
+ splitrdepends = strip_multilib(localdata.getVar('RDEPENDS', True), d) or ""
+ splitrrecommends = strip_multilib(localdata.getVar('RRECOMMENDS', True), d) or ""
+ splitrsuggests = strip_multilib(localdata.getVar('RSUGGESTS', True), d) or ""
+ splitrprovides = strip_multilib(localdata.getVar('RPROVIDES', True), d) or ""
+ splitrreplaces = strip_multilib(localdata.getVar('RREPLACES', True), d) or ""
+ splitrconflicts = strip_multilib(localdata.getVar('RCONFLICTS', True), d) or ""
+ splitrobsoletes = []
+
+ # For now we need to manually supplement RPROVIDES with any update-alternatives links
+ if pkg == d.getVar("PN", True):
+ splitrprovides = splitrprovides + " " + (d.getVar('ALTERNATIVE_LINK', True) or '') + " " + (d.getVar('ALTERNATIVE_LINKS', True) or '')
+
+ # Gather special src/first package data
+ if srcname == splitname:
+ srcrdepends = splitrdepends
+ srcrrecommends = splitrrecommends
+ srcrsuggests = splitrsuggests
+ srcrprovides = splitrprovides
+ srcrreplaces = splitrreplaces
+ srcrconflicts = splitrconflicts
+
+ srcpreinst = localdata.getVar('pkg_preinst', True)
+ srcpostinst = localdata.getVar('pkg_postinst', True)
+ srcprerm = localdata.getVar('pkg_prerm', True)
+ srcpostrm = localdata.getVar('pkg_postrm', True)
+
+ file_list = []
+ walk_files(root, file_list, conffiles)
+ if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
+ bb.note("Not creating empty RPM package for %s" % splitname)
+ else:
+ bb.note("Creating RPM package for %s" % splitname)
+ spec_files_top.append('%files')
+ spec_files_top.append('%defattr(-,-,-,-)')
+ if file_list:
+ bb.note("Creating RPM package for %s" % splitname)
+ spec_files_top.extend(file_list)
+ else:
+ bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ spec_files_top.append('')
+
+ bb.utils.unlockfile(lf)
+ continue
+
+ # Process subpackage data
+ spec_preamble_bottom.append('%%package -n %s' % splitname)
+ spec_preamble_bottom.append('Summary: %s' % splitsummary)
+ if srcversion != splitversion:
+ spec_preamble_bottom.append('Version: %s' % splitversion)
+ if srcrelease != splitrelease:
+ spec_preamble_bottom.append('Release: %s' % splitrelease)
+ if srcepoch != splitepoch:
+ spec_preamble_bottom.append('Epoch: %s' % splitepoch)
+ if srclicense != splitlicense:
+ spec_preamble_bottom.append('License: %s' % splitlicense)
+ spec_preamble_bottom.append('Group: %s' % splitsection)
+
+ # Replaces == Obsoletes && Provides
+ if splitrreplaces and splitrreplaces.strip() != "":
+ for dep in splitrreplaces.split(','):
+ if splitrprovides:
+ splitrprovides = splitrprovides + ", " + dep
+ else:
+ splitrprovides = dep
+ if splitrobsoletes:
+ splitrobsoletes = splitrobsoletes + ", " + dep
+ else:
+ splitrobsoletes = dep
+
+ print_deps(splitrdepends, "Requires", spec_preamble_bottom, d)
+ # Suggests in RPM are like recommends in OE-core!
+ print_deps(splitrrecommends, "Suggests", spec_preamble_bottom, d)
+ # While there is no analog for suggests... (So call them recommends for now)
+ print_deps(splitrsuggests, "Recommends", spec_preamble_bottom, d)
+ print_deps(splitrprovides, "Provides", spec_preamble_bottom, d)
+ print_deps(splitrobsoletes, "Obsoletes", spec_preamble_bottom, d)
+
+ # conflicts can not be in a provide! We will need to filter it.
+ if splitrconflicts:
+ depends_dict = bb.utils.explode_dep_versions(splitrconflicts)
+ newdeps_dict = {}
+ for dep in depends_dict:
+ if dep not in splitrprovides:
+ newdeps_dict[dep] = depends_dict[dep]
+ if newdeps_dict:
+ splitrconflicts = bb.utils.join_deps(newdeps_dict)
+ else:
+ splitrconflicts = ""
+
+ print_deps(splitrconflicts, "Conflicts", spec_preamble_bottom, d)
+
+ spec_preamble_bottom.append('')
+
+ spec_preamble_bottom.append('%%description -n %s' % splitname)
+ dedent_text = textwrap.dedent(splitdescription).strip()
+ spec_preamble_bottom.append('%s' % textwrap.fill(dedent_text, width=75))
+
+ spec_preamble_bottom.append('')
+
+ # Now process scriptlets
+ for script in ["preinst", "postinst", "prerm", "postrm"]:
+ scriptvar = localdata.getVar('pkg_%s' % script, True)
+ if not scriptvar:
+ continue
+ if script == 'preinst':
+ spec_scriptlets_bottom.append('%%pre -n %s' % splitname)
+ elif script == 'postinst':
+ spec_scriptlets_bottom.append('%%post -n %s' % splitname)
+ elif script == 'prerm':
+ spec_scriptlets_bottom.append('%%preun -n %s' % splitname)
+ scriptvar = wrap_uninstall(scriptvar)
+ elif script == 'postrm':
+ spec_scriptlets_bottom.append('%%postun -n %s' % splitname)
+ scriptvar = wrap_uninstall(scriptvar)
+ spec_scriptlets_bottom.append('# %s - %s' % (splitname, script))
+ spec_scriptlets_bottom.append(scriptvar)
+ spec_scriptlets_bottom.append('')
+
+ # Now process files
+ file_list = []
+ walk_files(root, file_list, conffiles)
+ if not file_list and localdata.getVar('ALLOW_EMPTY') != "1":
+ bb.note("Not creating empty RPM package for %s" % splitname)
+ else:
+ spec_files_bottom.append('%%files -n %s' % splitname)
+ spec_files_bottom.append('%defattr(-,-,-,-)')
+ if file_list:
+ bb.note("Creating RPM package for %s" % splitname)
+ spec_files_bottom.extend(file_list)
+ else:
+ bb.note("Creating EMPTY RPM Package for %s" % splitname)
+ spec_files_bottom.append('')
+
+ del localdata
bb.utils.unlockfile(lf)
+
+ add_prep(d,spec_files_bottom)
+ spec_preamble_top.append('Summary: %s' % srcsummary)
+ spec_preamble_top.append('Name: %s' % srcname)
+ spec_preamble_top.append('Version: %s' % srcversion)
+ spec_preamble_top.append('Release: %s' % srcrelease)
+ if srcepoch and srcepoch.strip() != "":
+ spec_preamble_top.append('Epoch: %s' % srcepoch)
+ spec_preamble_top.append('License: %s' % srclicense)
+ spec_preamble_top.append('Group: %s' % srcsection)
+ spec_preamble_top.append('Packager: %s' % srcmaintainer)
+ spec_preamble_top.append('URL: %s' % srchomepage)
+ source_list = get_tarballs(d)
+ tail_source(d,source_list,None)
+
+ # Replaces == Obsoletes && Provides
+ if srcrreplaces and srcrreplaces.strip() != "":
+ for dep in srcrreplaces.split(','):
+ if srcrprovides:
+ srcrprovides = srcrprovides + ", " + dep
+ else:
+ srcrprovides = dep
+ if srcrobsoletes:
+ srcrobsoletes = srcrobsoletes + ", " + dep
+ else:
+ srcrobsoletes = dep
+
+ print_deps(srcdepends, "BuildRequires", spec_preamble_top, d)
+ print_deps(srcrdepends, "Requires", spec_preamble_top, d)
+ # Suggests in RPM are like recommends in OE-core!
+ print_deps(srcrrecommends, "Suggests", spec_preamble_top, d)
+ # While there is no analog for suggests... (So call them recommends for now)
+ print_deps(srcrsuggests, "Recommends", spec_preamble_top, d)
+ print_deps(srcrprovides, "Provides", spec_preamble_top, d)
+ print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d)
+
+ # conflicts can not be in a provide! We will need to filter it.
+ if srcrconflicts:
+ depends_dict = bb.utils.explode_dep_versions(srcrconflicts)
+ newdeps_dict = {}
+ for dep in depends_dict:
+ if dep not in srcrprovides:
+ newdeps_dict[dep] = depends_dict[dep]
+ if newdeps_dict:
+ srcrconflicts = bb.utils.join_deps(newdeps_dict)
+ else:
+ srcrconflicts = ""
+
+ print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d)
+
+ spec_preamble_top.append('')
+
+ spec_preamble_top.append('%description')
+ dedent_text = textwrap.dedent(srcdescription).strip()
+ spec_preamble_top.append('%s' % textwrap.fill(dedent_text, width=75))
+
+ spec_preamble_top.append('')
+
+ if srcpreinst:
+ spec_scriptlets_top.append('%pre')
+ spec_scriptlets_top.append('# %s - preinst' % srcname)
+ spec_scriptlets_top.append(srcpreinst)
+ spec_scriptlets_top.append('')
+ if srcpostinst:
+ spec_scriptlets_top.append('%post')
+ spec_scriptlets_top.append('# %s - postinst' % srcname)
+ spec_scriptlets_top.append(srcpostinst)
+ spec_scriptlets_top.append('')
+ if srcprerm:
+ spec_scriptlets_top.append('%preun')
+ spec_scriptlets_top.append('# %s - prerm' % srcname)
+ scriptvar = wrap_uninstall(srcprerm)
+ spec_scriptlets_top.append(scriptvar)
+ spec_scriptlets_top.append('')
+ if srcpostrm:
+ spec_scriptlets_top.append('%postun')
+ spec_scriptlets_top.append('# %s - postrm' % srcname)
+ scriptvar = wrap_uninstall(srcpostrm)
+ spec_scriptlets_top.append(scriptvar)
+ spec_scriptlets_top.append('')
+
+ # Write the SPEC file
+ try:
+ from __builtin__ import file
+ specfile = file(outspecfile, 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+ # RPMSPEC_PREAMBLE is a way to add arbitrary text to the top
+ # of the generated spec file
+ external_preamble = d.getVar("RPMSPEC_PREAMBLE", True)
+ if external_preamble:
+ specfile.write(external_preamble + "\n")
+
+ for line in spec_preamble_top:
+ specfile.write(line + "\n")
+
+ for line in spec_preamble_bottom:
+ specfile.write(line + "\n")
+
+ for line in spec_scriptlets_top:
+ specfile.write(line + "\n")
+
+ for line in spec_scriptlets_bottom:
+ specfile.write(line + "\n")
+
+ for line in spec_files_top:
+ specfile.write(line + "\n")
+
+ for line in spec_files_bottom:
+ specfile.write(line + "\n")
+
+ specfile.close()
+}
+
+python do_package_rpm () {
+ import os
+
+ def creat_srpm_dir(d):
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) and d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ clean_licenses = get_licenses(d)
+ pkgwritesrpmdir = bb.data.expand('${PKGWRITEDIRSRPM}/${PACKAGE_ARCH_EXTEND}', d)
+ pkgwritesrpmdir = pkgwritesrpmdir + '/' + clean_licenses
+ bb.mkdirhier(pkgwritesrpmdir)
+ os.chmod(pkgwritesrpmdir, 0755)
+ return pkgwritesrpmdir
+
+ # We need a simple way to remove the MLPREFIX from the package name,
+ # and dependency information...
+ def strip_multilib(name, d):
+ ml = d.getVar("MLPREFIX", True)
+ if ml and name and len(ml) != 0 and name.find(ml) >= 0:
+ return "".join(name.split(ml))
+ return name
+
+ workdir = d.getVar('WORKDIR', True)
+ outdir = d.getVar('DEPLOY_DIR_IPK', True)
+ tmpdir = d.getVar('TMPDIR', True)
+ pkgd = d.getVar('PKGD', True)
+ pkgdest = d.getVar('PKGDEST', True)
+ if not workdir or not outdir or not pkgd or not tmpdir:
+ bb.error("Variables incorrectly set, unable to package")
+ return
+
+ packages = d.getVar('PACKAGES', True)
+ if not packages or packages == '':
+ bb.debug(1, "No packages; nothing to do")
+ return
+
+ # Construct the spec file...
+ srcname = strip_multilib(d.getVar('PN', True), d)
+ outspecfile = workdir + "/" + srcname + ".spec"
+ d.setVar('OUTSPECFILE', outspecfile)
+ bb.build.exec_func('write_specfile', d)
+
+ # Construct per file dependencies file
+ def dump_filerdeps(varname, outfile, d):
+ outfile.write("#!/usr/bin/env python\n\n")
+ outfile.write("# Dependency table\n")
+ outfile.write('deps = {\n')
+ for pkg in packages.split():
+ dependsflist_key = 'FILE' + varname + 'FLIST' + "_" + pkg
+ dependsflist = (d.getVar(dependsflist_key, True) or "")
+ for dfile in dependsflist.split():
+ key = "FILE" + varname + "_" + dfile + "_" + pkg
+ depends_dict = bb.utils.explode_dep_versions(d.getVar(key, True) or "")
+ file = dfile.replace("@underscore@", "_")
+ file = file.replace("@closebrace@", "]")
+ file = file.replace("@openbrace@", "[")
+ file = file.replace("@tab@", "\t")
+ file = file.replace("@space@", " ")
+ file = file.replace("@at@", "@")
+ outfile.write('"' + pkgd + file + '" : "')
+ for dep in depends_dict:
+ ver = depends_dict[dep]
+ if dep and ver:
+ ver = ver.replace("(","")
+ ver = ver.replace(")","")
+ outfile.write(dep + " " + ver + " ")
+ else:
+ outfile.write(dep + " ")
+ outfile.write('",\n')
+ outfile.write('}\n\n')
+ outfile.write("import sys\n")
+ outfile.write("while 1:\n")
+ outfile.write("\tline = sys.stdin.readline().strip()\n")
+ outfile.write("\tif not line:\n")
+ outfile.write("\t\tsys.exit(0)\n")
+ outfile.write("\tif line in deps:\n")
+ outfile.write("\t\tprint(deps[line] + '\\n')\n")
+
+ # OE-core dependencies a.k.a. RPM requires
+ outdepends = workdir + "/" + srcname + ".requires"
+
+ try:
+ from __builtin__ import file
+ dependsfile = file(outdepends, 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+ dump_filerdeps('RDEPENDS', dependsfile, d)
+
+ dependsfile.close()
+ os.chmod(outdepends, 0755)
+
+ # OE-core / RPM Provides
+ outprovides = workdir + "/" + srcname + ".provides"
+
+ try:
+ from __builtin__ import file
+ providesfile = file(outprovides, 'w')
+ except OSError:
+ raise bb.build.FuncFailed("unable to open spec file for writing.")
+
+ dump_filerdeps('RPROVIDES', providesfile, d)
+
+ providesfile.close()
+ os.chmod(outprovides, 0755)
+
+ # Setup the rpmbuild arguments...
+ rpmbuild = d.getVar('RPMBUILD', True)
+ targetsys = d.getVar('TARGET_SYS', True)
+ targetvendor = d.getVar('TARGET_VENDOR', True)
+ package_arch = d.getVar('PACKAGE_ARCH', True) or ""
+ if package_arch not in "all any noarch".split():
+ ml_prefix = (d.getVar('MLPREFIX', True) or "").replace("-", "_")
+ d.setVar('PACKAGE_ARCH_EXTEND', ml_prefix + package_arch)
+ else:
+ d.setVar('PACKAGE_ARCH_EXTEND', package_arch)
+ pkgwritedir = d.expand('${PKGWRITEDIRRPM}/${PACKAGE_ARCH_EXTEND}')
+ pkgarch = d.expand('${PACKAGE_ARCH_EXTEND}${TARGET_VENDOR}-${TARGET_OS}')
+ magicfile = d.expand('${STAGING_DIR_NATIVE}${datadir_native}/misc/magic.mgc')
+ bb.mkdirhier(pkgwritedir)
+ os.chmod(pkgwritedir, 0755)
+
+ cmd = rpmbuild
+ cmd = cmd + " --nodeps --short-circuit --target " + pkgarch + " --buildroot " + pkgd
+ cmd = cmd + " --define '_topdir " + workdir + "' --define '_rpmdir " + pkgwritedir + "'"
+ cmd = cmd + " --define '_build_name_fmt %%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm'"
+ cmd = cmd + " --define '_use_internal_dependency_generator 0'"
+ cmd = cmd + " --define '__find_requires " + outdepends + "'"
+ cmd = cmd + " --define '__find_provides " + outprovides + "'"
+ cmd = cmd + " --define '_unpackaged_files_terminate_build 0'"
+ cmd = cmd + " --define 'debug_package %{nil}'"
+ cmd = cmd + " --define '_rpmfc_magic_path " + magicfile + "'"
+ cmd = cmd + " --define '_tmppath " + workdir + "'"
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) and d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ cmdsrpm = cmd + " --define '_sourcedir " + workdir + "' --define '_srcrpmdir " + creat_srpm_dir(d) + "'"
+ cmdsrpm = 'fakeroot ' + cmdsrpm + " -bs " + outspecfile
+ cmd = cmd + " -bb " + outspecfile
+
+ # Build the source rpm package !
+ if d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True) and d.getVar('SOURCE_ARCHIVE_PACKAGE_TYPE', True).upper() == 'SRPM':
+ d.setVar('SBUILDSPEC', cmdsrpm + "\n")
+ d.setVarFlag('SBUILDSPEC', 'func', '1')
+ bb.build.exec_func('SBUILDSPEC', d)
+
+
+ # Build the rpm package!
+ d.setVar('BUILDSPEC', cmd + "\n")
+ d.setVarFlag('BUILDSPEC', 'func', '1')
+ bb.build.exec_func('BUILDSPEC', d)
}
python () {
- import bb
- if bb.data.getVar('PACKAGES', d, True) != '':
- deps = (bb.data.getVarFlag('do_package_write_rpm', 'depends', d) or "").split()
- deps.append('rpm-native:do_populate_staging')
- deps.append('fakeroot-native:do_populate_staging')
- bb.data.setVarFlag('do_package_write_rpm', 'depends', " ".join(deps), d)
+ if d.getVar('PACKAGES', True) != '':
+ deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
+ d.appendVarFlag('do_package_write_rpm', 'depends', deps)
+ d.setVarFlag('do_package_write_rpm', 'fakeroot', 1)
+ d.setVarFlag('do_package_write_rpm_setscene', 'fakeroot', 1)
}
+SSTATETASKS += "do_package_write_rpm"
+do_package_write_rpm[sstate-name] = "deploy-rpm"
+do_package_write_rpm[sstate-inputdirs] = "${PKGWRITEDIRRPM}"
+do_package_write_rpm[sstate-outputdirs] = "${DEPLOY_DIR_RPM}"
+# Take a shared lock, we can write multiple packages at the same time...
+# but we need to stop the rootfs/solver from running while we do...
+do_package_write_rpm[sstate-lockfile-shared] += "${DEPLOY_DIR_RPM}/rpm.lock"
+
+python do_package_write_rpm_setscene () {
+ sstate_setscene(d)
+}
+addtask do_package_write_rpm_setscene
python do_package_write_rpm () {
bb.build.exec_func("read_subpackage_metadata", d)
- bb.build.exec_func("rpm_prep", d)
bb.build.exec_func("do_package_rpm", d)
}
-do_package_write_rpm[dirs] = "${D}"
+do_package_write_rpm[dirs] = "${PKGWRITEDIRRPM}"
+do_package_write_rpm[umask] = "022"
addtask package_write_rpm before do_package_write after do_package
+PACKAGEINDEXES += "package_update_index_rpm; createrepo ${DEPLOY_DIR_RPM};"
+PACKAGEINDEXDEPS += "rpm-native:do_populate_sysroot"
+PACKAGEINDEXDEPS += "createrepo-native:do_populate_sysroot"
diff --git a/meta/classes/package_tar.bbclass b/meta/classes/package_tar.bbclass
index 876cec6cf..68b1bf0fe 100644
--- a/meta/classes/package_tar.bbclass
+++ b/meta/classes/package_tar.bbclass
@@ -3,18 +3,15 @@ inherit package
IMAGE_PKGTYPE ?= "tar"
python package_tar_fn () {
- import os
- from bb import data
- fn = os.path.join(bb.data.getVar('DEPLOY_DIR_TAR', d), "%s-%s-%s.tar.gz" % (bb.data.getVar('PKG', d), bb.data.getVar('PV', d), bb.data.getVar('PR', d)))
- fn = bb.data.expand(fn, d)
- bb.data.setVar('PKGFN', fn, d)
+ fn = os.path.join(d.getVar('DEPLOY_DIR_TAR'), "%s-%s-%s.tar.gz" % (d.getVar('PKG'), d.getVar('PKGV'), d.getVar('PKGR')))
+ fn = d.expand(fn)
+ d.setVar('PKGFN', fn)
}
python package_tar_install () {
- import os, sys
- pkg = bb.data.getVar('PKG', d, 1)
- pkgfn = bb.data.getVar('PKGFN', d, 1)
- rootfs = bb.data.getVar('IMAGE_ROOTFS', d, 1)
+ pkg = d.getVar('PKG', True)
+ pkgfn = d.getVar('PKGFN', True)
+ rootfs = d.getVar('IMAGE_ROOTFS', True)
if None in (pkg,pkgfn,rootfs):
bb.error("missing variables (one or more of PKG, PKGFN, IMAGEROOTFS)")
@@ -23,6 +20,7 @@ python package_tar_install () {
bb.mkdirhier(rootfs)
os.chdir(rootfs)
except OSError:
+ import sys
(type, value, traceback) = sys.exc_info()
print value
raise bb.build.FuncFailed
@@ -37,25 +35,24 @@ python package_tar_install () {
}
python do_package_tar () {
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- import os # path manipulations
- outdir = bb.data.getVar('DEPLOY_DIR_TAR', d, 1)
+ outdir = d.getVar('DEPLOY_DIR_TAR', True)
if not outdir:
bb.error("DEPLOY_DIR_TAR not defined, unable to package")
return
bb.mkdirhier(outdir)
- dvar = bb.data.getVar('D', d, 1)
+ dvar = d.getVar('D', True)
if not dvar:
bb.error("D not defined, unable to package")
return
bb.mkdirhier(dvar)
- packages = bb.data.getVar('PACKAGES', d, 1)
+ packages = d.getVar('PACKAGES', True)
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
@@ -64,29 +61,29 @@ python do_package_tar () {
localdata = bb.data.createCopy(d)
root = "%s/install/%s" % (workdir, pkg)
- bb.data.setVar('ROOT', '', localdata)
- bb.data.setVar('ROOT_%s' % pkg, root, localdata)
- bb.data.setVar('PKG', pkg, localdata)
+ localdata.setVar('ROOT', '')
+ localdata.setVar('ROOT_%s' % pkg, root)
+ localdata.setVar('PKG', pkg)
- overrides = bb.data.getVar('OVERRIDES', localdata)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
raise bb.build.FuncFailed('OVERRIDES not defined')
- overrides = bb.data.expand(overrides, localdata)
- bb.data.setVar('OVERRIDES', '%s:%s' % (overrides, pkg), localdata)
+ overrides = localdata.expand(overrides)
+ localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
bb.data.update_data(localdata)
- root = bb.data.getVar('ROOT', localdata)
+ root = localdata.getVar('ROOT')
bb.mkdirhier(root)
basedir = os.path.dirname(root)
pkgoutdir = outdir
bb.mkdirhier(pkgoutdir)
bb.build.exec_func('package_tar_fn', localdata)
- tarfn = bb.data.getVar('PKGFN', localdata, 1)
+ tarfn = localdata.getVar('PKGFN', True)
os.chdir(root)
from glob import glob
if not glob('*'):
- bb.note("Not creating empty archive for %s-%s-%s" % (pkg, bb.data.getVar('PV', localdata, 1), bb.data.getVar('PR', localdata, 1)))
+ bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
continue
ret = os.system("tar -czf %s %s" % (tarfn, '.'))
if ret != 0:
@@ -94,12 +91,12 @@ python do_package_tar () {
}
python () {
- import bb
- if bb.data.getVar('PACKAGES', d, True) != '':
- deps = (bb.data.getVarFlag('do_package_write_tar', 'depends', d) or "").split()
- deps.append('tar-native:do_populate_staging')
- deps.append('fakeroot-native:do_populate_staging')
- bb.data.setVarFlag('do_package_write_tar', 'depends', " ".join(deps), d)
+ if d.getVar('PACKAGES', True) != '':
+ deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
+ deps.append('tar-native:do_populate_sysroot')
+ deps.append('virtual/fakeroot-native:do_populate_sysroot')
+ d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
+ d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
}
diff --git a/meta/classes/packaged-staging.bbclass b/meta/classes/packaged-staging.bbclass
deleted file mode 100644
index 144087fed..000000000
--- a/meta/classes/packaged-staging.bbclass
+++ /dev/null
@@ -1,440 +0,0 @@
-#
-# Populate builds using prebuilt packages where possible to speed up builds
-# and allow staging to be reconstructed.
-#
-# To use it add that line to conf/local.conf:
-#
-# INHERIT += "packaged-staging"
-#
-
-
-#
-# bitbake.conf set PSTAGING_ACTIVE = "0", this class sets to "1" if we're active
-#
-PSTAGE_PKGVERSION = "${PV}-${PR}"
-PSTAGE_PKGARCH = "${BUILD_SYS}"
-PSTAGE_EXTRAPATH ?= ""
-PSTAGE_PKGPATH = "${DISTRO}/${OELAYOUT_ABI}${PSTAGE_EXTRAPATH}"
-PSTAGE_PKGPN = "${@bb.data.expand('staging-${PN}-${MULTIMACH_ARCH}${TARGET_VENDOR}-${TARGET_OS}', d).replace('_', '-')}"
-PSTAGE_PKGNAME = "${PSTAGE_PKGPN}_${PSTAGE_PKGVERSION}_${PSTAGE_PKGARCH}.ipk"
-PSTAGE_PKG = "${DEPLOY_DIR_PSTAGE}/${PSTAGE_PKGPATH}/${PSTAGE_PKGNAME}"
-
-PSTAGE_NATIVEDEPENDS = "\
- shasum-native \
- stagemanager-native \
- "
-
-BB_STAMP_WHITELIST = "${PSTAGE_NATIVEDEPENDS}"
-
-python () {
- import bb
- pstage_allowed = True
-
- # These classes encode staging paths into the binary data so can only be
- # reused if the path doesn't change/
- if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d):
- path = bb.data.getVar('PSTAGE_PKGPATH', d, 1)
- path = path + bb.data.getVar('TMPDIR', d, 1).replace('/', '-')
- bb.data.setVar('PSTAGE_PKGPATH', path, d)
-
- # PSTAGE_NATIVEDEPENDS lists the packages we need before we can use packaged
- # staging. There will always be some packages we depend on.
- if bb.data.inherits_class('native', d):
- pn = bb.data.getVar('PN', d, True)
- nativedeps = bb.data.getVar('PSTAGE_NATIVEDEPENDS', d, True).split()
- if pn in nativedeps:
- pstage_allowed = False
-
- # Images aren't of interest to us
- if bb.data.inherits_class('image', d):
- pstage_allowed = False
-
- # Add task dependencies if we're active, otherwise mark packaged staging
- # as inactive.
- if pstage_allowed:
- deps = bb.data.getVarFlag('do_setscene', 'depends', d) or ""
- deps += " stagemanager-native:do_populate_staging"
- bb.data.setVarFlag('do_setscene', 'depends', deps, d)
-
- policy = bb.data.getVar("BB_STAMP_POLICY", d, True)
- if policy == "whitelist" or policy == "full":
- deps = bb.data.getVarFlag('do_setscene', 'recrdeptask', d) or ""
- deps += " do_setscene"
- bb.data.setVarFlag('do_setscene', 'recrdeptask', deps, d)
-
- bb.data.setVar("PSTAGING_ACTIVE", "1", d)
- else:
- bb.data.setVar("PSTAGING_ACTIVE", "0", d)
-}
-
-DEPLOY_DIR_PSTAGE ?= "${DEPLOY_DIR}/pstage"
-PSTAGE_MACHCONFIG = "${DEPLOY_DIR_PSTAGE}/opkg.conf"
-
-PSTAGE_PKGMANAGER = "stage-manager-ipkg"
-
-PSTAGE_BUILD_CMD = "stage-manager-ipkg-build -o 0 -g 0"
-PSTAGE_INSTALL_CMD = "${PSTAGE_PKGMANAGER} -f ${PSTAGE_MACHCONFIG} -force-depends -o ${TMPDIR} install"
-PSTAGE_UPDATE_CMD = "${PSTAGE_PKGMANAGER} -f ${PSTAGE_MACHCONFIG} -o ${TMPDIR} update"
-PSTAGE_REMOVE_CMD = "${PSTAGE_PKGMANAGER} -f ${PSTAGE_MACHCONFIG} -force-depends -o ${TMPDIR} remove"
-PSTAGE_LIST_CMD = "${PSTAGE_PKGMANAGER} -f ${PSTAGE_MACHCONFIG} -o ${TMPDIR} list_installed"
-
-PSTAGE_TMPDIR_STAGE = "${WORKDIR}/staging-pkg"
-
-def pstage_manualclean(srcname, destvarname, d):
- import os, bb
-
- src = os.path.join(bb.data.getVar('PSTAGE_TMPDIR_STAGE', d, True), srcname)
- dest = bb.data.getVar(destvarname, d, True)
-
- for walkroot, dirs, files in os.walk(src):
- for file in files:
- # Avoid breaking the held lock
- if (file == "staging.lock"):
- continue
- filepath = os.path.join(walkroot, file).replace(src, dest)
- bb.note("rm %s" % filepath)
- os.system("rm %s" % filepath)
-
-def pstage_set_pkgmanager(d):
- import bb
- path = bb.data.getVar("PATH", d, 1)
- pkgmanager = bb.which(path, 'opkg-cl')
- if pkgmanager == "":
- pkgmanager = bb.which(path, 'ipkg-cl')
- if pkgmanager != "":
- bb.data.setVar("PSTAGE_PKGMANAGER", pkgmanager, d)
-
-
-def pstage_cleanpackage(pkgname, d):
- import os, bb
-
- path = bb.data.getVar("PATH", d, 1)
- pstage_set_pkgmanager(d)
- list_cmd = bb.data.getVar("PSTAGE_LIST_CMD", d, True)
-
- bb.note("Checking if staging package installed")
- lf = bb.utils.lockfile(bb.data.expand("${STAGING_DIR}/staging.lock", d))
- ret = os.system("PATH=\"%s\" %s | grep %s" % (path, list_cmd, pkgname))
- if ret == 0:
- bb.note("Yes. Uninstalling package from staging...")
- removecmd = bb.data.getVar("PSTAGE_REMOVE_CMD", d, 1)
- ret = os.system("PATH=\"%s\" %s %s" % (path, removecmd, pkgname))
- if ret != 0:
- bb.note("Failure removing staging package")
- else:
- bb.note("No. Manually removing any installed files")
- pstage_manualclean("staging", "STAGING_DIR", d)
- pstage_manualclean("cross", "CROSS_DIR", d)
- pstage_manualclean("deploy", "DEPLOY_DIR", d)
-
- bb.utils.unlockfile(lf)
-
-do_clean_prepend() {
- """
- Clear the build and temp directories
- """
-
- removepkg = bb.data.expand("${PSTAGE_PKGPN}", d)
- pstage_cleanpackage(removepkg, d)
-
- stagepkg = bb.data.expand("${PSTAGE_PKG}", d)
- bb.note("Removing staging package %s" % stagepkg)
- os.system('rm -rf ' + stagepkg)
-}
-
-staging_helper () {
- # Assemble appropriate opkg.conf
- conffile=${PSTAGE_MACHCONFIG}
- mkdir -p ${DEPLOY_DIR_PSTAGE}/pstaging_lists
- if [ ! -e $conffile ]; then
- ipkgarchs="${BUILD_SYS}"
- priority=1
- for arch in $ipkgarchs; do
- echo "arch $arch $priority" >> $conffile
- priority=$(expr $priority + 5)
- done
- echo "dest root /" >> $conffile
- fi
- if [ ! -e ${TMPDIR}${layout_libdir}/opkg/info/ ]; then
- mkdir -p ${TMPDIR}${layout_libdir}/opkg/info/
- fi
- if [ ! -e ${TMPDIR}${layout_libdir}/ipkg/ ]; then
- cd ${TMPDIR}${layout_libdir}/
- ln -sf opkg/ ipkg || true
- fi
-}
-
-PSTAGE_TASKS_COVERED = "fetch unpack munge patch configure qa_configure rig_locales compile sizecheck install deploy package populate_staging package_write_deb package_write_ipk package_write package_stage qa_staging"
-
-SCENEFUNCS += "packagestage_scenefunc"
-
-python packagestage_scenefunc () {
- import os
-
- if bb.data.getVar("PSTAGING_ACTIVE", d, 1) == "0":
- return
-
- bb.build.exec_func("staging_helper", d)
-
- removepkg = bb.data.expand("${PSTAGE_PKGPN}", d)
- pstage_cleanpackage(removepkg, d)
-
- stagepkg = bb.data.expand("${PSTAGE_PKG}", d)
-
- if os.path.exists(stagepkg):
- path = bb.data.getVar("PATH", d, 1)
- pstage_set_pkgmanager(d)
- file = bb.data.getVar("FILE", d, True)
- bb.debug(2, "Packaged staging active for %s\n" % file)
-
- #
- # Install the staging package somewhere temporarily so we can extract the stamp files
- #
- bb.mkdirhier(bb.data.expand("${WORKDIR}/tstage/${layout_libdir}/opkg/info/ ", d))
- cmd = bb.data.expand("${PSTAGE_PKGMANAGER} -f ${PSTAGE_MACHCONFIG} -force-depends -o ${WORKDIR}/tstage install", d)
- ret = os.system("PATH=\"%s\" %s %s" % (path, cmd, stagepkg))
- if ret != 0:
- bb.fatal("Couldn't install the staging package to a temp directory")
-
- #
- # Copy the stamp files into the main stamps directoy
- #
- cmd = bb.data.expand("cp -dpR ${WORKDIR}/tstage/stamps/* ${TMPDIR}/stamps/", d)
- ret = os.system(cmd)
- if ret != 0:
- bb.fatal("Couldn't copy the staging package stamp files")
-
- #
- # Iterate over the stamps seeing if they're valid. If we find any that
- # are invalid or the task wasn't in the taskgraph, assume caution and
- # do a rebuild.
- #
- # FIXME - some tasks are safe to ignore in the task graph. e.g. package_write_*
- stageok = True
- taskscovered = bb.data.getVar("PSTAGE_TASKS_COVERED", d, True).split()
- stamp = bb.data.getVar("STAMP", d, True)
- for task in taskscovered:
- task = 'do_' + task
- stampfn = "%s.%s" % (stamp, task)
- bb.debug(1, "Checking %s" % (stampfn))
- if os.path.exists(stampfn):
- stageok = bb.runqueue.check_stamp_fn(file, task, d)
- bb.debug(1, "Result %s" % (stageok))
- if not stageok:
- break
-
- # Remove the stamps and files we added above
- # FIXME - we should really only remove the stamps we added
- os.system('rm -f ' + stamp + '.*')
- os.system(bb.data.expand("rm -rf ${WORKDIR}/tstage", d))
-
- if stageok:
- bb.note("Staging package found, using it for %s." % file)
- installcmd = bb.data.getVar("PSTAGE_INSTALL_CMD", d, 1)
- lf = bb.utils.lockfile(bb.data.expand("${STAGING_DIR}/staging.lock", d))
- ret = os.system("PATH=\"%s\" %s %s" % (path, installcmd, stagepkg))
- bb.utils.unlockfile(lf)
- if ret != 0:
- bb.note("Failure installing prestage package")
-
- bb.build.make_stamp("do_stage_package_populated", d)
- else:
- bb.note("Staging package found but invalid for %s" % file)
-
-}
-packagestage_scenefunc[cleandirs] = "${PSTAGE_TMPDIR_STAGE}"
-packagestage_scenefunc[dirs] = "${STAGING_DIR}"
-
-addhandler packagedstage_stampfixing_eventhandler
-python packagedstage_stampfixing_eventhandler() {
- from bb.event import getName
- import os
-
- if getName(e) == "StampUpdate":
- taskscovered = bb.data.getVar("PSTAGE_TASKS_COVERED", e.data, 1).split()
- for (fn, task) in e.targets:
- # strip off 'do_'
- task = task[3:]
- if task in taskscovered:
- stamp = "%s.do_stage_package_populated" % e.stampPrefix[fn]
- if os.path.exists(stamp):
- # We're targetting a task which was skipped with packaged staging
- # so we need to remove the autogenerated stamps.
- for task in taskscovered:
- dir = "%s.do_%s" % (e.stampPrefix[fn], task)
- os.system('rm -f ' + dir)
- os.system('rm -f ' + stamp)
-
- return NotHandled
-}
-
-populate_staging_preamble () {
- if [ "$PSTAGING_ACTIVE" = "1" ]; then
- stage-manager -p ${STAGING_DIR} -c ${DEPLOY_DIR_PSTAGE}/stamp-cache-staging -u || true
- stage-manager -p ${CROSS_DIR} -c ${DEPLOY_DIR_PSTAGE}/stamp-cache-cross -u || true
- fi
-}
-
-populate_staging_postamble () {
- if [ "$PSTAGING_ACTIVE" = "1" ]; then
- # list the packages currently installed in staging
- # ${PSTAGE_LIST_CMD} | awk '{print $1}' > ${DEPLOY_DIR_PSTAGE}/installed-list
-
- # exitcode == 5 is ok, it means the files change
- set +e
- stage-manager -p ${STAGING_DIR} -c ${DEPLOY_DIR_PSTAGE}/stamp-cache-staging -u -d ${PSTAGE_TMPDIR_STAGE}/staging
- exitcode=$?
- if [ "$exitcode" != "5" -a "$exitcode" != "0" ]; then
- exit $exitcode
- fi
- stage-manager -p ${CROSS_DIR} -c ${DEPLOY_DIR_PSTAGE}/stamp-cache-cross -u -d ${PSTAGE_TMPDIR_STAGE}/cross/${BASE_PACKAGE_ARCH}
- if [ "$exitcode" != "5" -a "$exitcode" != "0" ]; then
- exit $exitcode
- fi
- set -e
- fi
-}
-
-do_populate_staging[lockfiles] = "${STAGING_DIR}/staging.lock"
-do_populate_staging[dirs] =+ "${DEPLOY_DIR_PSTAGE}"
-python do_populate_staging_prepend() {
- bb.build.exec_func("populate_staging_preamble", d)
-}
-
-python do_populate_staging_append() {
- bb.build.exec_func("populate_staging_postamble", d)
-}
-
-
-staging_packager () {
-
- mkdir -p ${PSTAGE_TMPDIR_STAGE}/CONTROL
- mkdir -p ${DEPLOY_DIR_PSTAGE}/${PSTAGE_PKGPATH}
-
- echo "Package: ${PSTAGE_PKGPN}" > ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- echo "Version: ${PSTAGE_PKGVERSION}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- echo "Description: ${DESCRIPTION}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- echo "Section: ${SECTION}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- echo "Priority: Optional" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- echo "Maintainer: ${MAINTAINER}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- echo "Architecture: ${PSTAGE_PKGARCH}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
-
- # Protect against empty SRC_URI
- if [ "${SRC_URI}" != "" ] ; then
- echo "Source: ${SRC_URI}" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- else
- echo "Source: OpenEmbedded" >> ${PSTAGE_TMPDIR_STAGE}/CONTROL/control
- fi
-
- ${PSTAGE_BUILD_CMD} ${PSTAGE_TMPDIR_STAGE} ${DEPLOY_DIR_PSTAGE}/${PSTAGE_PKGPATH}
-}
-
-staging_package_installer () {
- #${PSTAGE_INSTALL_CMD} ${PSTAGE_PKG}
-
- STATUSFILE=${TMPDIR}${layout_libdir}/opkg/status
- echo "Package: ${PSTAGE_PKGPN}" >> $STATUSFILE
- echo "Version: ${PSTAGE_PKGVERSION}" >> $STATUSFILE
- echo "Status: install user installed" >> $STATUSFILE
- echo "Architecture: ${PSTAGE_PKGARCH}" >> $STATUSFILE
- echo "" >> $STATUSFILE
-
- CTRLFILE=${TMPDIR}${layout_libdir}/opkg/info/${PSTAGE_PKGPN}.control
- echo "Package: ${PSTAGE_PKGPN}" > $CTRLFILE
- echo "Version: ${PSTAGE_PKGVERSION}" >> $CTRLFILE
- echo "Architecture: ${PSTAGE_PKGARCH}" >> $CTRLFILE
-
- cd ${PSTAGE_TMPDIR_STAGE}
- find -type f | grep -v ./CONTROL | sed -e 's/^\.//' > ${TMPDIR}${layout_libdir}/opkg/info/${PSTAGE_PKGPN}.list
-}
-
-python do_package_stage () {
- if bb.data.getVar("PSTAGING_ACTIVE", d, 1) != "1":
- return
-
- #
- # Handle deploy/ packages
- #
- bb.build.exec_func("read_subpackage_metadata", d)
- stagepath = bb.data.getVar("PSTAGE_TMPDIR_STAGE", d, 1)
- tmpdir = bb.data.getVar("TMPDIR", d, True)
- packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
- if len(packages) > 0:
- if bb.data.inherits_class('package_ipk', d):
- ipkpath = bb.data.getVar('DEPLOY_DIR_IPK', d, True).replace(tmpdir, stagepath)
- if bb.data.inherits_class('package_deb', d):
- debpath = bb.data.getVar('DEPLOY_DIR_DEB', d, True).replace(tmpdir, stagepath)
- if bb.data.inherits_class('package_rpm', d):
- rpmpath = bb.data.getVar('DEPLOY_DIR_RPM', d, True).replace(tmpdir, stagepath)
-
- for pkg in packages:
- pkgname = bb.data.getVar('PKG_%s' % pkg, d, 1)
- if not pkgname:
- pkgname = pkg
- arch = bb.data.getVar('PACKAGE_ARCH_%s' % pkg, d, 1)
- if not arch:
- arch = bb.data.getVar('PACKAGE_ARCH', d, 1)
- pr = bb.data.getVar('PR_%s' % pkg, d, 1)
- if not pr:
- pr = bb.data.getVar('PR', d, 1)
- if not packaged(pkg, d):
- continue
- if bb.data.inherits_class('package_ipk', d):
- srcname = bb.data.expand(pkgname + "_${PV}-" + pr + "_" + arch + ".ipk", d)
- srcfile = bb.data.expand("${DEPLOY_DIR_IPK}/" + arch + "/" + srcname, d)
- if os.path.exists(srcfile):
- destpath = ipkpath + "/" + arch + "/"
- bb.mkdirhier(destpath)
- bb.copyfile(srcfile, destpath + srcname)
-
- if bb.data.inherits_class('package_deb', d):
- if arch == 'all':
- srcname = bb.data.expand(pkgname + "_${PV}-" + pr + "_all.deb", d)
- else:
- srcname = bb.data.expand(pkgname + "_${PV}-" + pr + "_${DPKG_ARCH}.deb", d)
- srcfile = bb.data.expand("${DEPLOY_DIR_DEB}/" + arch + "/" + srcname, d)
- if os.path.exists(srcfile):
- destpath = debpath + "/" + arch + "/"
- bb.mkdirhier(destpath)
- bb.copyfile(srcfile, destpath + srcname)
-
- if bb.data.inherits_class('package_rpm', d):
- version = bb.data.getVar('PV', d, 1)
- version = version.replace('-', '+')
- bb.data.setVar('RPMPV', version, d)
- srcname = bb.data.expand(pkgname + "-${RPMPV}-" + pr + ".${TARGET_ARCH}.rpm", d)
- srcfile = bb.data.expand("${DEPLOY_DIR_RPM}/" + arch + "/" + srcname, d)
- if os.path.exists(srcfile):
- destpath = rpmpath + "/" + arch + "/"
- bb.mkdirhier(destpath)
- bb.copyfile(srcfile, destpath + srcname)
-
-
- #
- # Handle stamps/ files
- #
- stampfn = bb.data.getVar("STAMP", d, True)
- destdir = os.path.dirname(stampfn.replace(tmpdir, stagepath))
- bb.mkdirhier(destdir)
- # We need to include the package_stage stamp in the staging package so create one
- bb.build.make_stamp("do_package_stage", d)
- os.system("cp -dpR %s.do_* %s/" % (stampfn, destdir))
-
- pstage_set_pkgmanager(d)
- bb.build.exec_func("staging_helper", d)
- bb.build.exec_func("staging_packager", d)
- lf = bb.utils.lockfile(bb.data.expand("${STAGING_DIR}/staging.lock", d))
- bb.build.exec_func("staging_package_installer", d)
- bb.utils.unlockfile(lf)
-}
-
-#
-# Note an assumption here is that do_deploy runs before do_package_write/do_populate_staging
-#
-addtask package_stage after do_package_write do_populate_staging before do_build
-
-do_package_stage_all () {
- :
-}
-do_package_stage_all[recrdeptask] = "do_package_stage"
-addtask package_stage_all after do_package_stage before do_build
diff --git a/meta/classes/packagedata.bbclass b/meta/classes/packagedata.bbclass
index c9d64d6da..60f1aded0 100644
--- a/meta/classes/packagedata.bbclass
+++ b/meta/classes/packagedata.bbclass
@@ -1,82 +1,13 @@
-def packaged(pkg, d):
- import os, bb
- return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
-
-def read_pkgdatafile(fn):
- pkgdata = {}
-
- def decode(str):
- import codecs
- c = codecs.getdecoder("string_escape")
- return c(str)[0]
-
- import os
- if os.access(fn, os.R_OK):
- import re
- f = file(fn, 'r')
- lines = f.readlines()
- f.close()
- r = re.compile("([^:]+):\s*(.*)")
- for l in lines:
- m = r.match(l)
- if m:
- pkgdata[m.group(1)] = decode(m.group(2))
-
- return pkgdata
-
-def get_subpkgedata_fn(pkg, d):
- import bb, os
- archs = bb.data.expand("${PACKAGE_ARCHS}", d).split(" ")
- archs.reverse()
- pkgdata = bb.data.expand('${TMPDIR}/pkgdata/', d)
- targetdir = bb.data.expand('${TARGET_VENDOR}-${TARGET_OS}/runtime/', d)
- for arch in archs:
- fn = pkgdata + arch + targetdir + pkg
- if os.path.exists(fn):
- return fn
- return bb.data.expand('${PKGDATA_DIR}/runtime/%s' % pkg, d)
-
-def has_subpkgdata(pkg, d):
- import bb, os
- return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
-
-def read_subpkgdata(pkg, d):
- import bb
- return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
-
-def has_pkgdata(pn, d):
- import bb, os
- fn = bb.data.expand('${PKGDATA_DIR}/%s' % pn, d)
- return os.access(fn, os.R_OK)
-
-def read_pkgdata(pn, d):
- import bb
- fn = bb.data.expand('${PKGDATA_DIR}/%s' % pn, d)
- return read_pkgdatafile(fn)
-
python read_subpackage_metadata () {
- import bb
- data = read_pkgdata(bb.data.getVar('PN', d, 1), d)
+ import oe.packagedata
+
+ data = oe.packagedata.read_pkgdata(d.getVar('PN', True), d)
for key in data.keys():
- bb.data.setVar(key, data[key], d)
+ d.setVar(key, data[key])
- for pkg in bb.data.getVar('PACKAGES', d, 1).split():
- sdata = read_subpkgdata(pkg, d)
+ for pkg in d.getVar('PACKAGES', True).split():
+ sdata = oe.packagedata.read_subpkgdata(pkg, d)
for key in sdata.keys():
- bb.data.setVar(key, sdata[key], d)
+ d.setVar(key, sdata[key])
}
-
-
-#
-# Collapse FOO_pkg variables into FOO
-#
-def read_subpkgdata_dict(pkg, d):
- import bb
- ret = {}
- subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
- for var in subd:
- newvar = var.replace("_" + pkg, "")
- ret[newvar] = subd[var]
- return ret
-
diff --git a/meta/classes/packageinfo.bbclass b/meta/classes/packageinfo.bbclass
new file mode 100644
index 000000000..26cce604a
--- /dev/null
+++ b/meta/classes/packageinfo.bbclass
@@ -0,0 +1,46 @@
+python packageinfo_handler () {
+ if isinstance(e, bb.event.RequestPackageInfo):
+ import oe.packagedata
+ pkginfolist = []
+ tmpdir = e.data.getVar('TMPDIR', True)
+ target_vendor = e.data.getVar('TARGET_VENDOR', True)
+ target_os = e.data.getVar('TARGET_OS', True)
+ package_archs = e.data.getVar('PACKAGE_ARCHS', True)
+ packaging = e.data.getVar('PACKAGE_CLASSES', True).split()[0].split('_')[1]
+ deploy_dir = e.data.getVar('DEPLOY_DIR', True) + '/' + packaging
+ for arch in package_archs.split():
+ pkgdata_dir = tmpdir + '/pkgdata/' + arch + target_vendor + '-' + target_os + '/runtime/'
+ if os.path.exists(pkgdata_dir):
+ for root, dirs, files in os.walk(pkgdata_dir):
+ for pkgname in files:
+ if pkgname.endswith('.packaged'):
+ continue
+ sdata = oe.packagedata.read_pkgdatafile(root + pkgname)
+ sdata['PKG'] = pkgname
+ pkgrename = sdata['PKG_%s' % pkgname]
+ pkgv = sdata['PKGV'].replace('-', '+')
+ pkgr = sdata['PKGR']
+ # We found there are some renaming issue with certain architecture.
+ # For example, armv7a-vfp-neon, it will use armv7a in the rpm file. This is the workaround for it.
+ arch_tmp = arch.split('-')[0]
+ if os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgname + '-' + pkgv + '-' + pkgr + '.' + arch + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgname + '-' + pkgv + '-' + pkgr + '.' + arch_tmp + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgrename + '-' + pkgv + '-' + pkgr + '.' + arch + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgrename + '-' + pkgv + '-' + pkgr + '.' + arch_tmp + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgname + '_' + pkgv + '-' + pkgr + '_' + arch + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgname + '_' + pkgv + '-' + pkgr + '_' + arch_tmp + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgrename + '_' + pkgv + '-' + pkgr + '_' + arch + '.' + packaging) or \
+ os.path.exists(deploy_dir + '/' + arch + '/' + \
+ pkgrename + '_' + pkgv + '-' + pkgr + '_' + arch_tmp + '.' + packaging):
+ pkginfolist.append(sdata)
+ bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data)
+}
+
+addhandler packageinfo_handler
diff --git a/meta/classes/patch.bbclass b/meta/classes/patch.bbclass
index 8bb2dee79..3c4d99783 100644
--- a/meta/classes/patch.bbclass
+++ b/meta/classes/patch.bbclass
@@ -1,505 +1,176 @@
# Copyright (C) 2006 OpenedHand LTD
# Point to an empty file so any user's custom settings don't break things
-QUILTRCFILE ?= "${STAGING_BINDIR_NATIVE}/quiltrc"
+QUILTRCFILE ?= "${STAGING_ETCDIR_NATIVE}/quiltrc"
-def patch_init(d):
- import os, sys
+PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
- def md5sum(fname):
- import md5, sys
+inherit terminal
- f = file(fname, 'rb')
- m = md5.new()
- while True:
- d = f.read(8096)
- if not d:
- break
- m.update(d)
- f.close()
- return m.hexdigest()
-
- class CmdError(Exception):
- def __init__(self, exitstatus, output):
- self.status = exitstatus
- self.output = output
-
- def __str__(self):
- return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output)
-
- class NotFoundError(Exception):
- def __init__(self, path):
- self.path = path
- def __str__(self):
- return "Error: %s not found." % self.path
-
- def runcmd(args, dir = None):
- import commands
-
- if dir:
- olddir = os.path.abspath(os.curdir)
- if not os.path.exists(dir):
- raise NotFoundError(dir)
- os.chdir(dir)
- # print("cwd: %s -> %s" % (olddir, self.dir))
-
- try:
- args = [ commands.mkarg(str(arg)) for arg in args ]
- cmd = " ".join(args)
- # print("cmd: %s" % cmd)
- (exitstatus, output) = commands.getstatusoutput(cmd)
- if exitstatus != 0:
- raise CmdError(exitstatus >> 8, output)
- return output
-
- finally:
- if dir:
- os.chdir(olddir)
-
- class PatchError(Exception):
- def __init__(self, msg):
- self.msg = msg
-
- def __str__(self):
- return "Patch Error: %s" % self.msg
-
- import bb, bb.data, bb.fetch
-
- class PatchSet(object):
- defaults = {
- "strippath": 1
- }
-
- def __init__(self, dir, d):
- self.dir = dir
- self.d = d
- self.patches = []
- self._current = None
-
- def current(self):
- return self._current
-
- def Clean(self):
- """
- Clean out the patch set. Generally includes unapplying all
- patches and wiping out all associated metadata.
- """
- raise NotImplementedError()
-
- def Import(self, patch, force):
- if not patch.get("file"):
- if not patch.get("remote"):
- raise PatchError("Patch file must be specified in patch import.")
- else:
- patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
-
- for param in PatchSet.defaults:
- if not patch.get(param):
- patch[param] = PatchSet.defaults[param]
-
- if patch.get("remote"):
- patch["file"] = bb.data.expand(bb.fetch.localpath(patch["remote"], self.d), self.d)
-
- patch["filemd5"] = md5sum(patch["file"])
-
- def Push(self, force):
- raise NotImplementedError()
-
- def Pop(self, force):
- raise NotImplementedError()
-
- def Refresh(self, remote = None, all = None):
- raise NotImplementedError()
-
-
- class PatchTree(PatchSet):
- def __init__(self, dir, d):
- PatchSet.__init__(self, dir, d)
-
- def Import(self, patch, force = None):
- """"""
- PatchSet.Import(self, patch, force)
-
- if self._current is not None:
- i = self._current + 1
- else:
- i = 0
- self.patches.insert(i, patch)
-
- def _applypatch(self, patch, force = None, reverse = None):
- shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
- if reverse:
- shellcmd.append('-R')
-
- if not force:
- shellcmd.append('--dry-run')
-
- output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
-
- if force:
- return
-
- shellcmd.pop(len(shellcmd) - 1)
- output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
- return output
-
- def Push(self, force = None, all = None):
+def src_patches(d, all = False ):
+ workdir = d.getVar('WORKDIR', True)
+ fetch = bb.fetch2.Fetch([], d)
+ patches = []
+ sources = []
+ for url in fetch.urls:
+ local = patch_path(url, fetch, workdir)
+ if not local:
if all:
- for i in self.patches:
- if self._current is not None:
- self._current = self._current + 1
- else:
- self._current = 0
- bb.note("applying patch %s" % i)
- self._applypatch(i, force)
- else:
- if self._current is not None:
- self._current = self._current + 1
- else:
- self._current = 0
- bb.note("applying patch %s" % self.patches[self._current])
- self._applypatch(self.patches[self._current], force)
-
-
- def Pop(self, force = None, all = None):
- if all:
- for i in self.patches:
- self._applypatch(i, force, True)
- else:
- self._applypatch(self.patches[self._current], force, True)
-
- def Clean(self):
- """"""
-
- class QuiltTree(PatchSet):
- def _runcmd(self, args):
- quiltrc = bb.data.getVar('QUILTRCFILE', self.d, 1)
- runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
-
- def _quiltpatchpath(self, file):
- return os.path.join(self.dir, "patches", os.path.basename(file))
-
-
- def __init__(self, dir, d):
- PatchSet.__init__(self, dir, d)
- self.initialized = False
- p = os.path.join(self.dir, 'patches')
- if not os.path.exists(p):
- os.makedirs(p)
-
- def Clean(self):
- try:
- self._runcmd(["pop", "-a", "-f"])
- except Exception:
- pass
- self.initialized = True
-
- def InitFromDir(self):
- # read series -> self.patches
- seriespath = os.path.join(self.dir, 'patches', 'series')
- if not os.path.exists(self.dir):
- raise Exception("Error: %s does not exist." % self.dir)
- if os.path.exists(seriespath):
- series = file(seriespath, 'r')
- for line in series.readlines():
- patch = {}
- parts = line.strip().split()
- patch["quiltfile"] = self._quiltpatchpath(parts[0])
- patch["quiltfilemd5"] = md5sum(patch["quiltfile"])
- if len(parts) > 1:
- patch["strippath"] = parts[1][2:]
- self.patches.append(patch)
- series.close()
-
- # determine which patches are applied -> self._current
- try:
- output = runcmd(["quilt", "applied"], self.dir)
- except CmdError:
- if sys.exc_value.output.strip() == "No patches applied":
- return
- else:
- raise sys.exc_value
- output = [val for val in output.split('\n') if not val.startswith('#')]
- for patch in self.patches:
- if os.path.basename(patch["quiltfile"]) == output[-1]:
- self._current = self.patches.index(patch)
- self.initialized = True
-
- def Import(self, patch, force = None):
- if not self.initialized:
- self.InitFromDir()
- PatchSet.Import(self, patch, force)
-
- args = ["import", "-p", patch["strippath"]]
- if force:
- args.append("-f")
- args.append("-dn")
- args.append(patch["file"])
-
- self._runcmd(args)
-
- patch["quiltfile"] = self._quiltpatchpath(patch["file"])
- patch["quiltfilemd5"] = md5sum(patch["quiltfile"])
-
- # TODO: determine if the file being imported:
- # 1) is already imported, and is the same
- # 2) is already imported, but differs
-
- self.patches.insert(self._current or 0, patch)
-
-
- def Push(self, force = None, all = None):
- # quilt push [-f]
-
- args = ["push"]
- if force:
- args.append("-f")
- if all:
- args.append("-a")
+ local = fetch.localpath(url)
+ sources.append(local)
+ continue
- self._runcmd(args)
+ urldata = fetch.ud[url]
+ parm = urldata.parm
+ patchname = parm.get('pname') or os.path.basename(local)
- if self._current is not None:
- self._current = self._current + 1
- else:
- self._current = 0
+ apply, reason = should_apply(parm, d)
+ if not apply:
+ if reason:
+ bb.note("Patch %s %s" % (patchname, reason))
+ continue
- def Pop(self, force = None, all = None):
- # quilt pop [-f]
- args = ["pop"]
- if force:
- args.append("-f")
- if all:
- args.append("-a")
+ patchparm = {'patchname': patchname}
+ if "striplevel" in parm:
+ striplevel = parm["striplevel"]
+ elif "pnum" in parm:
+ #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+ striplevel = parm["pnum"]
+ else:
+ striplevel = '1'
+ patchparm['striplevel'] = striplevel
- self._runcmd(args)
+ patchdir = parm.get('patchdir')
+ if patchdir:
+ patchparm['patchdir'] = patchdir
- if self._current == 0:
- self._current = None
+ localurl = bb.encodeurl(('file', '', local, '', '', patchparm))
+ patches.append(localurl)
- if self._current is not None:
- self._current = self._current - 1
+ if all:
+ return sources
- def Refresh(self, **kwargs):
- if kwargs.get("remote"):
- patch = self.patches[kwargs["patch"]]
- if not patch:
- raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
- (type, host, path, user, pswd, parm) = bb.decodeurl(patch["remote"])
- if type == "file":
- import shutil
- if not patch.get("file") and patch.get("remote"):
- patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
+ return patches
- shutil.copyfile(patch["quiltfile"], patch["file"])
- else:
- raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
- else:
- # quilt refresh
- args = ["refresh"]
- if kwargs.get("quiltfile"):
- args.append(os.path.basename(kwargs["quiltfile"]))
- elif kwargs.get("patch"):
- args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
- self._runcmd(args)
+def patch_path(url, fetch, workdir):
+ """Return the local path of a patch, or None if this isn't a patch"""
- class Resolver(object):
- def __init__(self, patchset):
- raise NotImplementedError()
+ local = fetch.localpath(url)
+ base, ext = os.path.splitext(os.path.basename(local))
+ if ext in ('.gz', '.bz2', '.Z'):
+ local = os.path.join(workdir, base)
+ ext = os.path.splitext(base)[1]
- def Resolve(self):
- raise NotImplementedError()
+ urldata = fetch.ud[url]
+ if "apply" in urldata.parm:
+ apply = oe.types.boolean(urldata.parm["apply"])
+ if not apply:
+ return
+ elif ext not in (".diff", ".patch"):
+ return
- def Revert(self):
- raise NotImplementedError()
+ return local
- def Finalize(self):
- raise NotImplementedError()
+def should_apply(parm, d):
+ """Determine if we should apply the given patch"""
- class NOOPResolver(Resolver):
- def __init__(self, patchset):
- self.patchset = patchset
+ if "mindate" in parm or "maxdate" in parm:
+ pn = d.getVar('PN', True)
+ srcdate = d.getVar('SRCDATE_%s' % pn, True)
+ if not srcdate:
+ srcdate = d.getVar('SRCDATE', True)
- def Resolve(self):
- olddir = os.path.abspath(os.curdir)
- os.chdir(self.patchset.dir)
- try:
- self.patchset.Push()
- except Exception:
- os.chdir(olddir)
- raise sys.exc_value
+ if srcdate == "now":
+ srcdate = d.getVar('DATE', True)
- # Patch resolver which relies on the user doing all the work involved in the
- # resolution, with the exception of refreshing the remote copy of the patch
- # files (the urls).
- class UserResolver(Resolver):
- def __init__(self, patchset):
- self.patchset = patchset
+ if "maxdate" in parm and parm["maxdate"] < srcdate:
+ return False, 'is outdated'
- # Force a push in the patchset, then drop to a shell for the user to
- # resolve any rejected hunks
- def Resolve(self):
+ if "mindate" in parm and parm["mindate"] > srcdate:
+ return False, 'is predated'
- olddir = os.path.abspath(os.curdir)
- os.chdir(self.patchset.dir)
- try:
- self.patchset.Push(True)
- except CmdError, v:
- # Patch application failed
- if sys.exc_value.output.strip() == "No patches applied":
- return
- print(sys.exc_value)
- print('NOTE: dropping user into a shell, so that patch rejects can be fixed manually.')
- print('Press CTRL+D to exit.')
- os.system('/bin/sh')
+ if "minrev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and srcrev < parm["minrev"]:
+ return False, 'applies to later revisions'
- # Construct a new PatchSet after the user's changes, compare the
- # sets, checking patches for modifications, and doing a remote
- # refresh on each.
- oldpatchset = self.patchset
- self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
- self.patchset.InitFromDir()
+ if "maxrev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and srcrev > parm["maxrev"]:
+ return False, 'applies to earlier revisions'
- for patch in self.patchset.patches:
- oldpatch = None
- for opatch in oldpatchset.patches:
- if opatch["quiltfile"] == patch["quiltfile"]:
- oldpatch = opatch
+ if "rev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and parm["rev"] not in srcrev:
+ return False, "doesn't apply to revision"
- if oldpatch:
- patch["remote"] = oldpatch["remote"]
- if patch["quiltfile"] == oldpatch["quiltfile"]:
- if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
- bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
- # user change? remote refresh
- self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
- else:
- # User did not fix the problem. Abort.
- raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
- except Exception:
- os.chdir(olddir)
- raise
- os.chdir(olddir)
+ if "notrev" in parm:
+ srcrev = d.getVar('SRCREV', True)
+ if srcrev and parm["notrev"] in srcrev:
+ return False, "doesn't apply to revision"
- g = globals()
- g["PatchSet"] = PatchSet
- g["PatchTree"] = PatchTree
- g["QuiltTree"] = QuiltTree
- g["Resolver"] = Resolver
- g["UserResolver"] = UserResolver
- g["NOOPResolver"] = NOOPResolver
- g["NotFoundError"] = NotFoundError
- g["CmdError"] = CmdError
- g["PatchError"] = PatchError
+ return True, None
-addtask patch after do_unpack
-do_patch[dirs] = "${WORKDIR}"
-PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_staging"
-do_patch[depends] = "${PATCHDEPENDENCY}"
+should_apply[vardepsexclude] = "DATE SRCDATE"
python patch_do_patch() {
- import re
- import bb.fetch
-
- patch_init(d)
-
- src_uri = (bb.data.getVar('SRC_URI', d, 1) or '').split()
- if not src_uri:
- return
+ import oe.patch
patchsetmap = {
- "patch": PatchTree,
- "quilt": QuiltTree,
+ "patch": oe.patch.PatchTree,
+ "quilt": oe.patch.QuiltTree,
+ "git": oe.patch.GitApplyTree,
}
- cls = patchsetmap[bb.data.getVar('PATCHTOOL', d, 1) or 'quilt']
+ cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
resolvermap = {
- "noop": NOOPResolver,
- "user": UserResolver,
+ "noop": oe.patch.NOOPResolver,
+ "user": oe.patch.UserResolver,
}
- rcls = resolvermap[bb.data.getVar('PATCHRESOLVE', d, 1) or 'noop']
-
- s = bb.data.getVar('S', d, 1)
-
- path = os.getenv('PATH')
- os.putenv('PATH', bb.data.getVar('PATH', d, 1))
- patchset = cls(s, d)
- patchset.Clean()
+ rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
- resolver = rcls(patchset)
+ classes = {}
- workdir = bb.data.getVar('WORKDIR', d, 1)
- for url in src_uri:
- (type, host, path, user, pswd, parm) = bb.decodeurl(url)
- if not "patch" in parm:
- continue
+ s = d.getVar('S', True)
- bb.fetch.init([url],d)
- url = bb.encodeurl((type, host, path, user, pswd, []))
- local = os.path.join('/', bb.fetch.localpath(url, d))
+ path = os.getenv('PATH')
+ os.putenv('PATH', d.getVar('PATH', True))
- # did it need to be unpacked?
- dots = os.path.basename(local).split(".")
- if dots[-1] in ['gz', 'bz2', 'Z']:
- unpacked = os.path.join(bb.data.getVar('WORKDIR', d),'.'.join(dots[0:-1]))
- else:
- unpacked = local
- unpacked = bb.data.expand(unpacked, d)
+ for patch in src_patches(d):
+ _, _, local, _, _, parm = bb.decodeurl(patch)
- if "pnum" in parm:
- pnum = parm["pnum"]
+ if "patchdir" in parm:
+ patchdir = parm["patchdir"]
+ if not os.path.isabs(patchdir):
+ patchdir = os.path.join(s, patchdir)
else:
- pnum = "1"
+ patchdir = s
- if "pname" in parm:
- pname = parm["pname"]
+ if not patchdir in classes:
+ patchset = cls(patchdir, d)
+ resolver = rcls(patchset, oe_terminal)
+ classes[patchdir] = (patchset, resolver)
+ patchset.Clean()
else:
- pname = os.path.basename(unpacked)
-
- if "mindate" in parm or "maxdate" in parm:
- pn = bb.data.getVar('PN', d, 1)
- srcdate = bb.data.getVar('SRCDATE_%s' % pn, d, 1)
- if not srcdate:
- srcdate = bb.data.getVar('SRCDATE', d, 1)
+ patchset, resolver = classes[patchdir]
- if srcdate == "now":
- srcdate = bb.data.getVar('DATE', d, 1)
-
- if "maxdate" in parm and parm["maxdate"] < srcdate:
- bb.note("Patch '%s' is outdated" % pname)
- continue
-
- if "mindate" in parm and parm["mindate"] > srcdate:
- bb.note("Patch '%s' is predated" % pname)
- continue
-
-
- if "minrev" in parm:
- srcrev = bb.data.getVar('SRCREV', d, 1)
- if srcrev and srcrev < parm["minrev"]:
- bb.note("Patch '%s' applies to later revisions" % pname)
- continue
-
- if "maxrev" in parm:
- srcrev = bb.data.getVar('SRCREV', d, 1)
- if srcrev and srcrev > parm["maxrev"]:
- bb.note("Patch '%s' applies to earlier revisions" % pname)
- continue
-
- bb.note("Applying patch '%s'" % pname)
+ bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
try:
- patchset.Import({"file":unpacked, "remote":url, "strippath": pnum}, True)
- except NotFoundError:
- import sys
- raise bb.build.FuncFailed(str(sys.exc_value))
+ patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
+ except Exception as exc:
+ bb.fatal(str(exc))
try:
resolver.Resolve()
- except PatchError:
- import sys
- raise bb.build.FuncFailed(str(sys.exc_value))
+ except bb.BBHandledException as e:
+ bb.fatal(str(e))
}
+patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
+
+addtask patch after do_unpack
+do_patch[dirs] = "${WORKDIR}"
+do_patch[depends] = "${PATCHDEPENDENCY}"
EXPORT_FUNCTIONS do_patch
diff --git a/meta/classes/patcher.bbclass b/meta/classes/patcher.bbclass
deleted file mode 100644
index c8a1b0350..000000000
--- a/meta/classes/patcher.bbclass
+++ /dev/null
@@ -1,7 +0,0 @@
-# Now that BitBake/OpenEmbedded uses Quilt by default, you can simply add an
-# inherit patcher
-# to one of your config files to let BB/OE use patcher again.
-
-PATCHCLEANCMD = "patcher -B"
-PATCHCMD = "patcher -R -p '%s' -n '%s' -i '%s'"
-PATCH_DEPENDS = "${@["patcher-native", ""][(bb.data.getVar('PN', d, 1) == 'patcher-native')]}"
diff --git a/meta/classes/perlnative.bbclass b/meta/classes/perlnative.bbclass
new file mode 100644
index 000000000..2211b7213
--- /dev/null
+++ b/meta/classes/perlnative.bbclass
@@ -0,0 +1,3 @@
+PATH_prepend = "${STAGING_BINDIR_NATIVE}/perl-native:"
+DEPENDS += "perl-native"
+OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"
diff --git a/meta/classes/pkg_distribute.bbclass b/meta/classes/pkg_distribute.bbclass
index 81978e3e3..9f249a0df 100644
--- a/meta/classes/pkg_distribute.bbclass
+++ b/meta/classes/pkg_distribute.bbclass
@@ -1,6 +1,6 @@
PKG_DISTRIBUTECOMMAND[func] = "1"
python do_distribute_packages () {
- cmd = bb.data.getVar('PKG_DISTRIBUTECOMMAND', d, 1)
+ cmd = d.getVar('PKG_DISTRIBUTECOMMAND', True)
if not cmd:
raise bb.build.FuncFailed("Unable to distribute packages, PKG_DISTRIBUTECOMMAND not defined")
bb.build.exec_func('PKG_DISTRIBUTECOMMAND', d)
diff --git a/meta/classes/pkg_metainfo.bbclass b/meta/classes/pkg_metainfo.bbclass
index ac4f73c77..4b182690f 100644
--- a/meta/classes/pkg_metainfo.bbclass
+++ b/meta/classes/pkg_metainfo.bbclass
@@ -1,5 +1,5 @@
python do_pkg_write_metainfo () {
- deploydir = bb.data.getVar('DEPLOY_DIR', d, 1)
+ deploydir = d.getVar('DEPLOY_DIR', True)
if not deploydir:
bb.error("DEPLOY_DIR not defined, unable to write package info")
return
@@ -9,11 +9,11 @@ python do_pkg_write_metainfo () {
except OSError:
raise bb.build.FuncFailed("unable to open package-info file for writing.")
- name = bb.data.getVar('PN', d, 1)
- version = bb.data.getVar('PV', d, 1)
- desc = bb.data.getVar('DESCRIPTION', d, 1)
- page = bb.data.getVar('HOMEPAGE', d, 1)
- lic = bb.data.getVar('LICENSE', d, 1)
+ name = d.getVar('PN', True)
+ version = d.getVar('PV', True)
+ desc = d.getVar('DESCRIPTION', True)
+ page = d.getVar('HOMEPAGE', True)
+ lic = d.getVar('LICENSE', True)
infofile.write("|| "+ name +" || "+ version + " || "+ desc +" || "+ page +" || "+ lic + " ||\n" )
infofile.close()
diff --git a/meta/classes/pkgconfig.bbclass b/meta/classes/pkgconfig.bbclass
index d65f8a625..ddcf87822 100644
--- a/meta/classes/pkgconfig.bbclass
+++ b/meta/classes/pkgconfig.bbclass
@@ -1,11 +1,7 @@
-inherit base
-
DEPENDS_prepend = "pkgconfig-native "
-do_stage_append () {
- install -d ${PKG_CONFIG_DIR}
- for pc in `find ${S} -name '*.pc' -type f | grep -v -- '-uninstalled.pc$'`; do
- pcname=`basename $pc`
- cat $pc > ${PKG_CONFIG_DIR}/$pcname
- done
-}
+PKGCONFIGRDEP = "pkgconfig"
+PKGCONFIGRDEP_virtclass-native = ""
+PKGCONFIGRDEP_virtclass-nativesdk = "pkgconfig-nativesdk"
+
+RDEPENDS_${PN}-dev += "${PKGCONFIGRDEP}"
diff --git a/meta/classes/poky-autobuild-notifier.bbclass b/meta/classes/poky-autobuild-notifier.bbclass
deleted file mode 100644
index 5b0a77cb7..000000000
--- a/meta/classes/poky-autobuild-notifier.bbclass
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-# Copyright Openedhand Ltd 2008
-# Author: Richard Purdie
-#
-
-# Designed for use with the Poky autobuilder only and provides custom hooks for
-# certain specific events.
-
-def do_autobuilder_failure_report(event):
- from bb.event import getName
- from bb import data, mkdirhier, build
- import os, glob
-
- if data.getVar('PN', event.data, True) != "clutter":
- return
-
- import smtplib
- import email.Message
-
- version = data.expand("${PN}: ${PV}-${PR}", event.data)
-
- recipients = ["richard@o-hand.com", "ebassi@o-hand.com", "pippin@o-hand.com"]
- COMMASPACE = ', '
-
- message = email.Message.Message()
- message["To"] = COMMASPACE.join(recipients)
- message["From"] = "Poky Autobuilder Failure <poky@o-hand.com>"
- message["Subject"] = "Poky Autobuild Failure Report - " + version
-
- mesg = "Poky Build Failure for:\n\n"
-
- for var in ["DISTRO", "MACHINE", "PN", "PV", "PR"]:
- mesg += var + ": " + data.getVar(var, event.data, True) + "\n"
-
- mesg += "\nLog of the failure follows:\n\n"
-
- log_file = glob.glob("%s/log.%s.*" % (data.getVar('T', event.data, True), event.task))
- if len(log_file) != 0:
- mesg += "".join(open(log_file[0], 'r').readlines())
-
- message.set_payload(mesg)
-
- mailServer = smtplib.SMTP("pug.o-hand.com")
- mailServer.sendmail(message["From"], recipients, message.as_string())
- mailServer.quit()
-
-# we want to be an event handler
-addhandler poky_autobuilder_notifier_eventhandler
-python poky_autobuilder_notifier_eventhandler() {
- from bb import note, error, data
- from bb.event import NotHandled, getName
-
- if e.data is None:
- return NotHandled
-
- name = getName(e)
-
- if name == "TaskFailed":
- do_autobuilder_failure_report(e)
-
- return NotHandled
-}
diff --git a/meta/classes/poky-image.bbclass b/meta/classes/poky-image.bbclass
deleted file mode 100644
index 8d0efae55..000000000
--- a/meta/classes/poky-image.bbclass
+++ /dev/null
@@ -1,97 +0,0 @@
-# Common for Poky images
-#
-# Copyright (C) 2007 OpenedHand LTD
-
-# IMAGE_FEATURES control content of images built with Poky.
-#
-# By default we install task-poky-boot and task-base packages - this gives us
-# working (console only) rootfs.
-#
-# Available IMAGE_FEATURES:
-#
-# - apps-console-core
-# - x11-base - X11 server + minimal desktop
-# - x11-sato - OpenedHand Sato environment
-# - x11-netbook - Metacity based environment for netbooks
-# - apps-x11-core - X Terminal, file manager, file editor
-# - apps-x11-games
-# - apps-x11-pimlico - OpenedHand Pimlico apps
-# - tools-sdk - SDK
-# - tools-debug - debugging tools
-# - tools-profile - profiling tools
-# - tools-testapps - tools usable to make some device tests
-# - nfs-server - NFS server (exports / over NFS to everybody)
-# - dev-pkgs - development packages
-# - dbg-pkgs - debug packages
-#
-
-POKY_BASE_INSTALL = '\
- task-poky-boot \
- task-base-extended \
- ${@base_contains("IMAGE_FEATURES", "dbg-pkgs", "task-poky-boot-dbg task-base-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", "dev-pkgs", "task-poky-boot-dev task-base-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "apps-console-core", "task-poky-apps-console", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-console-core", "dbg-pkgs"], "task-poky-apps-console-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-console-core", "dev-pkgs"], "task-poky-apps-console-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "x11-base", "task-poky-x11-base", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["x11-base", "dbg-pkgs"], "task-poky-x11-base-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["x11-base", "dev-pkgs"], "task-poky-x11-base-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "x11-sato", "task-poky-x11-sato", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["x11-sato", "dbg-pkgs"], "task-poky-x11-sato-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["x11-sato", "dev-pkgs"], "task-poky-x11-sato-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "x11-netbook", "task-poky-x11-netbook", "", d)} \
- ${@base_contains("IMAGE_FEATURES", ["x11-netbook", "dbg-pkgs"], "task-poky-x11-netbook-dbg", "", d)} \
- ${@base_contains("IMAGE_FEATURES", ["x11-netbook", "dev-pkgs"], "task-poky-x11-netbook-dev", "", d)} \
- ${@base_contains("IMAGE_FEATURES", "apps-x11-core", "task-poky-apps-x11-core", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-x11-core", "dbg-pkgs"], "task-poky-apps-x11-core-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-x11-core", "dev-pkgs"], "task-poky-apps-x11-core-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "apps-x11-games", "task-poky-apps-x11-games", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-x11-games", "dbg-pkgs"], "task-poky-apps-x11-games-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-x11-games", "dev-pkgs"], "task-poky-apps-x11-games-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "apps-x11-pimlico", "task-poky-apps-x11-pimlico", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-x11-pimlico", "dbg-pkgs"], "task-poky-apps-x11-pimlico-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["apps-x11-pimlico", "dev-pkgs"], "task-poky-apps-x11-pimlico-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "tools-debug", "task-poky-tools-debug", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-debug", "dbg-pkgs"], "task-poky-tools-debug-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-debug", "dev-pkgs"], "task-poky-tools-debug-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "tools-profile", "task-poky-tools-profile", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-profile", "dbg-pkgs"], "task-poky-tools-profile-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-profile", "dev-pkgs"], "task-poky-tools-profile-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "tools-testapps", "task-poky-tools-testapps", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-testapps", "dbg-pkgs"], "task-poky-tools-testapps-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-testapps", "dev-pkgs"], "task-poky-tools-testapps-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "tools-sdk", "task-poky-sdk task-poky-standalone-sdk-target", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-sdk", "dbg-pkgs"], "task-poky-sdk-dbg", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["tools-sdk", "dev-pkgs"], "task-poky-sdk-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "nfs-server", "task-poky-nfs-server", "",d)} \
- ${@base_contains("IMAGE_FEATURES", ["nfs-server", "dbg-pkgs"], "task-poky-nfs-server-dbg", "",d)} \
-
- ${@base_contains("IMAGE_FEATURES", ["nfs-server", "dev-pkgs"], "task-poky-nfs-server-dev", "",d)} \
- \
- ${@base_contains("IMAGE_FEATURES", "package-management", "${ROOTFS_PKGMANAGE}", "",d)} \
- ${POKY_EXTRA_INSTALL} \
- '
-
-POKY_EXTRA_INSTALL ?= ""
-
-IMAGE_INSTALL ?= "${POKY_BASE_INSTALL}"
-
-X11_IMAGE_FEATURES = "x11-base apps-x11-core package-management"
-ENHANCED_IMAGE_FEATURES = "${X11_IMAGE_FEATURES} apps-x11-games apps-x11-pimlico package-management"
-SATO_IMAGE_FEATURES = "${ENHANCED_IMAGE_FEATURES} x11-sato"
-
-inherit image
-
-# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
diff --git a/meta/classes/poky.bbclass b/meta/classes/poky.bbclass
deleted file mode 100644
index ba81f9e22..000000000
--- a/meta/classes/poky.bbclass
+++ /dev/null
@@ -1,5 +0,0 @@
-MIRRORS_append () {
-ftp://.*/.* http://pokylinux.org/sources/
-http://.*/.* http://pokylinux.org/sources/
-https://.*/.* http://pokylinux.org/sources/
-}
diff --git a/meta/classes/populate_sdk.bbclass b/meta/classes/populate_sdk.bbclass
new file mode 100644
index 000000000..5aa8e92b8
--- /dev/null
+++ b/meta/classes/populate_sdk.bbclass
@@ -0,0 +1,89 @@
+inherit meta toolchain-scripts
+inherit populate_sdk_${IMAGE_PKGTYPE}
+
+SDK_DIR = "${WORKDIR}/sdk"
+SDK_OUTPUT = "${SDK_DIR}/image"
+SDK_DEPLOY = "${TMPDIR}/deploy/sdk"
+
+SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${MULTIMACH_TARGET_SYS}"
+
+TOOLCHAIN_HOST_TASK ?= "task-sdk-host-nativesdk task-cross-canadian-${TRANSLATED_TARGET_ARCH}"
+TOOLCHAIN_TARGET_TASK ?= "task-core-standalone-sdk-target task-core-standalone-sdk-target-dbg"
+TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${DISTRO_VERSION}"
+
+RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
+DEPENDS = "virtual/fakeroot-native sed-native"
+
+PID = "${@os.getpid()}"
+
+EXCLUDE_FROM_WORLD = "1"
+
+python () {
+ # If we don't do this we try and run the mapping hooks while parsing which is slow
+ # bitbake should really provide something to let us know this...
+ if bb.data.getVar('BB_WORKERCONTEXT', d, True) is not None:
+ runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", d)
+}
+
+fakeroot do_populate_sdk() {
+ rm -rf ${SDK_OUTPUT}
+ mkdir -p ${SDK_OUTPUT}
+
+ # populate_sdk_<image> is required to construct two images:
+ # SDK_ARCH-nativesdk - contains the cross compiler and associated tooling
+ # target - contains a target rootfs configured for the SDK usage
+ #
+ # the output of populate_sdk_<image> should end up in ${SDK_OUTPUT} it is made
+ # up of:
+ # ${SDK_OUTPUT}/<sdk_arch-nativesdk pkgs>
+ # ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/<target pkgs>
+
+ populate_sdk_${IMAGE_PKGTYPE}
+
+ # Don't ship any libGL in the SDK
+ rm -rf ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/libGL*
+
+ # Can copy pstage files here
+ # target_pkgs=`cat ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/var/lib/opkg/status | grep Package: | cut -f 2 -d ' '`
+
+ # Fix or remove broken .la files
+ #rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}/lib/*.la
+ rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/*.la
+
+ # Link the ld.so.cache file into the hosts filesystem
+ ln -s /etc/ld.so.cache ${SDK_OUTPUT}/${SDKPATHNATIVE}/etc/ld.so.cache
+
+ # Setup site file for external use
+ toolchain_create_sdk_siteconfig ${SDK_OUTPUT}/${SDKPATH}/site-config-${MULTIMACH_TARGET_SYS}
+
+ toolchain_create_sdk_env_script
+
+ # Add version information
+ toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${MULTIMACH_TARGET_SYS}
+
+ # Package it up
+ mkdir -p ${SDK_DEPLOY}
+ cd ${SDK_OUTPUT}
+ tar --owner=root --group=root -cj --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 .
+}
+
+populate_sdk_log_check() {
+ for target in $*
+ do
+ lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
+
+ echo "log_check: Using $lf_path as logfile"
+
+ if test -e "$lf_path"
+ then
+ ${IMAGE_PKGTYPE}_log_check $target $lf_path
+ else
+ echo "Cannot find logfile [$lf_path]"
+ fi
+ echo "Logfile is clean"
+ done
+}
+
+do_populate_sdk[nostamp] = "1"
+do_populate_sdk[recrdeptask] = "do_package_write"
+addtask populate_sdk before do_build after do_install
diff --git a/meta/classes/populate_sdk_deb.bbclass b/meta/classes/populate_sdk_deb.bbclass
new file mode 100644
index 000000000..920c89a0f
--- /dev/null
+++ b/meta/classes/populate_sdk_deb.bbclass
@@ -0,0 +1,61 @@
+do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
+do_populate_sdk[recrdeptask] += "do_package_write_deb"
+
+
+DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', True), "i386"]\
+ [d.getVar('SDK_ARCH', True) in \
+ ["x86", "i486", "i586", "i686", "pentium"]]}"
+
+populate_sdk_post_deb () {
+
+ local target_rootfs=$1
+
+ tar -cf - -C ${STAGING_ETCDIR_NATIVE} -ps apt | tar -xf - -C ${target_rootfs}/etc
+}
+
+populate_sdk_deb () {
+
+ # update index
+ package_update_index_deb
+
+ ## install target ##
+ # This needs to work in the same way as rootfs_deb.bbclass
+ echo "Installing TARGET packages"
+
+ mkdir -p ${IMAGE_ROOTFS}/var/dpkg/alternatives
+
+ export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
+ export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
+ export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
+ export INSTALL_PACKAGES_DEB="${TOOLCHAIN_TARGET_TASK}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
+ export PACKAGES_LINGUAS_DEB=""
+ export INSTALL_TASK_DEB="populate_sdk-target"
+
+ package_install_internal_deb
+
+ populate_sdk_post_deb ${INSTALL_ROOTFS_DEB}
+
+ populate_sdk_log_check populate_sdk
+
+ ## install nativesdk ##
+ echo "Installing NATIVESDK packages"
+ export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}"
+ export INSTALL_BASEARCH_DEB="${DEB_SDK_ARCH}"
+ export INSTALL_ARCHS_DEB="${SDK_PACKAGE_ARCHS}"
+ export INSTALL_PACKAGES_DEB="${TOOLCHAIN_HOST_TASK}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
+ export PACKAGES_LINGUAS_DEB=""
+ export INSTALL_TASK_DEB="populate_sdk-nativesdk"
+
+ package_install_internal_deb
+ populate_sdk_post_deb ${SDK_OUTPUT}/${SDKPATHNATIVE}
+
+ #move remainings
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/lib/dpkg
+ mv ${SDK_OUTPUT}/var/lib/dpkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/lib/dpkg
+ rm -rf ${SDK_OUTPUT}/var
+
+ populate_sdk_log_check populate_sdk
+}
+
diff --git a/meta/classes/populate_sdk_ipk.bbclass b/meta/classes/populate_sdk_ipk.bbclass
new file mode 100644
index 000000000..c256c69d4
--- /dev/null
+++ b/meta/classes/populate_sdk_ipk.bbclass
@@ -0,0 +1,49 @@
+do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
+do_populate_sdk[recrdeptask] += "do_package_write_ipk"
+
+populate_sdk_ipk() {
+
+ rm -f ${IPKGCONF_TARGET}
+ touch ${IPKGCONF_TARGET}
+ rm -f ${IPKGCONF_SDK}
+ touch ${IPKGCONF_SDK}
+
+ package_update_index_ipk
+ package_generate_ipkg_conf
+
+ export INSTALL_PACKAGES_ATTEMPTONLY_IPK=""
+ export INSTALL_PACKAGES_LINGUAS_IPK=""
+ export INSTALL_TASK_IPK="populate_sdk"
+
+ #install target
+ export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
+ export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
+ export INSTALL_PACKAGES_IPK="${TOOLCHAIN_TARGET_TASK}"
+
+ export D=${INSTALL_ROOTFS_IPK}
+ export OFFLINE_ROOT=${INSTALL_ROOTFS_IPK}
+ export IPKG_OFFLINE_ROOT=${INSTALL_ROOTFS_IPK}
+ export OPKG_OFFLINE_ROOT=${IPKG_OFFLINE_ROOT}
+
+ package_install_internal_ipk
+
+ #install host
+ export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}"
+ export INSTALL_CONF_IPK="${IPKGCONF_SDK}"
+ export INSTALL_PACKAGES_IPK="${TOOLCHAIN_HOST_TASK}"
+
+ package_install_internal_ipk
+
+ #post clean up
+ install -d ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}
+ install -m 0644 ${IPKGCONF_TARGET} ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}/
+
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
+ install -m 0644 ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
+
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg
+ mv ${SDK_OUTPUT}/var/lib/opkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg/
+ rm -Rf ${SDK_OUTPUT}/var
+
+ populate_sdk_log_check populate_sdk
+}
diff --git a/meta/classes/populate_sdk_rpm.bbclass b/meta/classes/populate_sdk_rpm.bbclass
new file mode 100644
index 000000000..e0adb8f89
--- /dev/null
+++ b/meta/classes/populate_sdk_rpm.bbclass
@@ -0,0 +1,132 @@
+do_populate_sdk[depends] += "rpm-native:do_populate_sysroot"
+do_populate_sdk[recrdeptask] += "do_package_write_rpm"
+
+rpmlibdir = "/var/lib/rpm"
+RPMOPTS="--dbpath ${rpmlibdir} --define='_openall_before_chroot 1'"
+RPM="rpm ${RPMOPTS}"
+
+do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
+
+populate_sdk_post_rpm () {
+
+ local target_rootfs=$1
+
+ # remove lock files
+ rm -f ${target_rootfs}/__db.*
+
+ # Move manifests into the directory with the logs
+ mv ${target_rootfs}/install/*.manifest ${T}/
+
+ # Remove all remaining resolver files
+ rm -rf ${target_rootfs}/install
+}
+
+populate_sdk_rpm () {
+
+ package_update_index_rpm
+ package_generate_rpm_conf
+
+ ## install target ##
+ # This needs to work in the same way as rootfs_rpm.bbclass!
+ #
+ export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
+ export INSTALL_PLATFORM_RPM="${TARGET_ARCH}"
+ export INSTALL_CONFBASE_RPM="${RPMCONF_TARGET_BASE}"
+ export INSTALL_PACKAGES_RPM="${TOOLCHAIN_TARGET_TASK}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_RPM=""
+ export INSTALL_PACKAGES_LINGUAS_RPM=""
+ export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig pkgconfig(pkg-config)"
+ export INSTALL_TASK_RPM="populate_sdk-target"
+
+ # Setup base system configuration
+ mkdir -p ${INSTALL_ROOTFS_RPM}/etc/rpm/
+ mkdir -p ${INSTALL_ROOTFS_RPM}${rpmlibdir}
+ mkdir -p ${INSTALL_ROOTFS_RPM}${rpmlibdir}/log
+ cat > ${INSTALL_ROOTFS_RPM}${rpmlibdir}/DB_CONFIG << EOF
+# ================ Environment
+set_data_dir .
+set_create_dir .
+set_lg_dir ./log
+set_tmp_dir ./tmp
+
+# -- thread_count must be >= 8
+set_thread_count 64
+
+# ================ Logging
+
+# ================ Memory Pool
+set_mp_mmapsize 268435456
+
+# ================ Locking
+set_lk_max_locks 16384
+set_lk_max_lockers 16384
+set_lk_max_objects 16384
+mutex_set_max 163840
+
+# ================ Replication
+EOF
+
+ # List must be prefered to least preferred order
+ INSTALL_PLATFORM_EXTRA_RPM=""
+ for each_arch in ${MULTILIB_PACKAGE_ARCHS} ${PACKAGE_ARCHS} ; do
+ INSTALL_PLATFORM_EXTRA_RPM="$each_arch $INSTALL_PLATFORM_EXTRA_RPM"
+ done
+ export INSTALL_PLATFORM_EXTRA_RPM
+
+ package_install_internal_rpm
+ populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
+
+ ## install nativesdk ##
+ echo "Installing NATIVESDK packages"
+ export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}"
+ export INSTALL_PLATFORM_RPM="${SDK_ARCH}"
+ export INSTALL_CONFBASE_RPM="${RPMCONF_HOST_BASE}"
+ export INSTALL_PACKAGES_RPM="${TOOLCHAIN_HOST_TASK}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_RPM=""
+ export INSTALL_PACKAGES_LINGUAS_RPM=""
+ export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig libGL.so()(64bit) libGL.so"
+ export INSTALL_TASK_RPM="populate_sdk_rpm-nativesdk"
+
+ # List must be prefered to least preferred order
+ INSTALL_PLATFORM_EXTRA_RPM=""
+ for each_arch in ${SDK_PACKAGE_ARCHS} ; do
+ INSTALL_PLATFORM_EXTRA_RPM="$each_arch $INSTALL_PLATFORM_EXTRA_RPM"
+ done
+ export INSTALL_PLATFORM_EXTRA_RPM
+
+ package_install_internal_rpm
+ populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
+
+ # move host RPM library data
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm
+ mv ${SDK_OUTPUT}${rpmlibdir}/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm/
+ rm -Rf ${SDK_OUTPUT}/var
+
+ install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
+ mv ${SDK_OUTPUT}/etc/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
+ rm -rf ${SDK_OUTPUT}/etc
+
+ populate_sdk_log_check populate_sdk
+
+ # Workaround so the parser knows we need the resolve_package function!
+ if false ; then
+ resolve_package_rpm foo ${RPMCONF_TARGET_BASE}.conf || true
+ fi
+}
+
+python () {
+ ml_package_archs = ""
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + eext[1]
+ localdata.setVar("OVERRIDES", overrides)
+ # TEMP: OVERRIDES isn't working right
+ localdata.setVar("DEFAULTTUNE", localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + eext[1], False) or "")
+ ml_package_archs += localdata.getVar("PACKAGE_ARCHS", True) or ""
+ #bb.note("ML_PACKAGE_ARCHS %s %s %s" % (eext[1], localdata.getVar("PACKAGE_ARCHS", True) or "(none)", overrides))
+ d.setVar('MULTILIB_PACKAGE_ARCHS', ml_package_archs)
+}
+
diff --git a/meta/classes/prexport.bbclass b/meta/classes/prexport.bbclass
new file mode 100644
index 000000000..2b16a6694
--- /dev/null
+++ b/meta/classes/prexport.bbclass
@@ -0,0 +1,47 @@
+PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
+PRSERV_DUMPOPT_PKGARCH = ""
+PRSERV_DUMPOPT_CHECKSUM = ""
+PRSERV_DUMPOPT_COL = "0"
+
+PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
+PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
+
+python prexport_handler () {
+ import bb.event
+ if not e.data:
+ return
+
+ if isinstance(e, bb.event.RecipeParsed):
+ import oe.prservice
+ #get all PR values for the current PRAUTOINX
+ ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
+ ver = ver.replace('%','-')
+ retval = oe.prservice.prserv_dump_db(e.data)
+ if not retval:
+ bb.fatal("prexport_handler: export failed!")
+ (metainfo, datainfo) = retval
+ if not datainfo:
+ bb.warn("prexport_handler: No AUROPR values found for %s" % ver)
+ return
+ oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
+ elif isinstance(e, bb.event.ParseStarted):
+ import bb.utils
+ import oe.prservice
+ oe.prservice.prserv_check_avail(e.data)
+ #remove dumpfile
+ bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
+ elif isinstance(e, bb.event.ParseCompleted):
+ import oe.prservice
+ #dump meta info of tables
+ d = e.data.createCopy()
+ d.setVar('PRSERV_DUMPOPT_COL', "1")
+ retval = oe.prservice.prserv_dump_db(d)
+ if not retval:
+ bb.error("prexport_handler: export failed!")
+ return
+ (metainfo, datainfo) = retval
+ oe.prservice.prserv_export_tofile(d, metainfo, None, True)
+
+}
+
+addhandler prexport_handler
diff --git a/meta/classes/primport.bbclass b/meta/classes/primport.bbclass
new file mode 100644
index 000000000..37b0d6b1b
--- /dev/null
+++ b/meta/classes/primport.bbclass
@@ -0,0 +1,20 @@
+python primport_handler () {
+ import bb.event
+ if not e.data:
+ return
+
+ if isinstance(e, bb.event.ParseCompleted):
+ import oe.prservice
+ #import all exported AUTOPR values
+ imported = oe.prservice.prserv_import_db(e.data)
+ if imported is None:
+ bb.fatal("import failed!")
+
+ for (version, pkgarch, checksum, value) in imported:
+ bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
+ elif isinstance(e, bb.event.ParseStarted):
+ import oe.prservice
+ oe.prservice.prserv_check_avail(e.data)
+}
+
+addhandler primport_handler
diff --git a/meta/classes/prserv.bbclass b/meta/classes/prserv.bbclass
new file mode 100644
index 000000000..0825306f9
--- /dev/null
+++ b/meta/classes/prserv.bbclass
@@ -0,0 +1,21 @@
+def prserv_get_pr_auto(d):
+ import oe.prservice
+ if d.getVar('USE_PR_SERV', True) != "1":
+ bb.warn("Not using network based PR service")
+ return None
+
+ version = d.getVar("PRAUTOINX", True)
+ pkgarch = d.getVar("PACKAGE_ARCH", True)
+ checksum = d.getVar("BB_TASKHASH", True)
+
+ if d.getVar('PRSERV_LOCKDOWN', True):
+ auto_rev = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
+ else:
+ conn = d.getVar("__PRSERV_CONN", True)
+ if conn is None:
+ conn = oe.prservice.prserv_make_conn(d)
+ if conn is None:
+ return None
+ auto_rev = conn.getPR(version, pkgarch, checksum)
+
+ return auto_rev
diff --git a/meta/classes/python-dir.bbclass b/meta/classes/python-dir.bbclass
new file mode 100644
index 000000000..0b6a33c2e
--- /dev/null
+++ b/meta/classes/python-dir.bbclass
@@ -0,0 +1,3 @@
+PYTHON_BASEVERSION ?= "2.7"
+PYTHON_DIR = "python${PYTHON_BASEVERSION}"
+PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"
diff --git a/meta/classes/qemu.bbclass b/meta/classes/qemu.bbclass
new file mode 100644
index 000000000..c0a538036
--- /dev/null
+++ b/meta/classes/qemu.bbclass
@@ -0,0 +1,15 @@
+#
+# This class contains functions for recipes that need QEMU or test for its
+# existence.
+#
+
+def qemu_target_binary(data):
+ import bb
+
+ target_arch = data.getVar("TARGET_ARCH", True)
+ if target_arch in ("i486", "i586", "i686"):
+ target_arch = "i386"
+ elif target_arch == "powerpc":
+ target_arch = "ppc"
+
+ return "qemu-" + target_arch
diff --git a/meta/classes/qmake2.bbclass b/meta/classes/qmake2.bbclass
index 5d221cc5d..5eebd8ec4 100644
--- a/meta/classes/qmake2.bbclass
+++ b/meta/classes/qmake2.bbclass
@@ -3,18 +3,22 @@
#
inherit qmake_base
-DEPENDS_prepend = "qmake2-cross uicmoc4-native "
+DEPENDS_prepend = "qt4-tools-native "
-export QTDIR = "${STAGING_DIR}/${HOST_SYS}/qt4"
-export QMAKESPEC = "${QTDIR}/mkspecs/${TARGET_OS}-oe-g++"
+export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34"
export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4"
export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4"
+export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4"
+export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4"
export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2"
export OE_QMAKE_LINK = "${CXX}"
export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
-export OE_QMAKE_INCDIR_QT = "${QTDIR}/include"
-export OE_QMAKE_LIBDIR_QT = "${QTDIR}/lib"
+export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
+export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
export OE_QMAKE_LIBS_QT = "qt"
export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
+export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
+export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
+export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"
diff --git a/meta/classes/qmake_base.bbclass b/meta/classes/qmake_base.bbclass
index ea8903c27..d1008b638 100644
--- a/meta/classes/qmake_base.bbclass
+++ b/meta/classes/qmake_base.bbclass
@@ -1,3 +1,4 @@
+QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake"
OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
@@ -31,15 +32,29 @@ oe_qmake_mkspecs () {
done
}
+do_generate_qt_config_file() {
+ export QT_CONF_PATH=${WORKDIR}/qt.conf
+ cat > ${WORKDIR}/qt.conf <<EOF
+[Paths]
+Prefix =
+Binaries = ${STAGING_BINDIR_NATIVE}
+Headers = ${STAGING_INCDIR}/qt4
+Plugins = ${STAGING_LIBDIR}/qt4/plugins/
+Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
+EOF
+}
+
+addtask generate_qt_config_file after do_patch before do_configure
+
qmake_base_do_configure() {
case ${QMAKESPEC} in
- *linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++)
+ *linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++|*linux-uclibceabi-oe-g++)
;;
*-oe-g++)
die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
;;
*)
- oenote Searching for qmake spec file
+ bbnote Searching for qmake spec file
paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
@@ -55,7 +70,7 @@ qmake_base_do_configure() {
;;
esac
- oenote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
+ bbnote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
if [ -z "${QMAKE_PROFILES}" ]; then
PROFILES="`ls *.pro`"
@@ -70,15 +85,20 @@ qmake_base_do_configure() {
if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
AFTER="-after"
QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
- oenote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
+ bbnote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
fi
if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
- oenote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
+ bbnote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
fi
-#oenote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
+ # Hack .pro files to use OE utilities
+ find -name '*.pro' \
+ -exec sed -i -e 's,=\s*.*/lrelease,= ${OE_QMAKE_LRELEASE},g' \
+ -e 's,=\s*.*/lupdate,= ${OE_QMAKE_LUPDATE},g' '{}' ';'
+
+#bbnote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
unset QMAKESPEC || true
${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
}
diff --git a/meta/classes/qt4e.bbclass b/meta/classes/qt4e.bbclass
new file mode 100644
index 000000000..05c24efaa
--- /dev/null
+++ b/meta/classes/qt4e.bbclass
@@ -0,0 +1,19 @@
+DEPENDS_prepend = "${@["qt4-embedded ", ""][(d.getVar('PN', True)[:12] == 'qt4-embedded')]}"
+
+inherit qmake2
+
+QT_BASE_NAME = "qt4-embedded"
+QT_DIR_NAME = "qtopia"
+QT_LIBINFIX = "E"
+# override variables set by qmake-base to compile Qt/Embedded apps
+#
+export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
+export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
+export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
+export OE_QMAKE_LIBS_QT = "qt"
+export OE_QMAKE_LIBS_X11 = ""
+export OE_QMAKE_EXTRA_MODULES = "network"
+EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
+
+# Qt4 uses atomic instructions not supported in thumb mode
+ARM_INSTRUCTION_SET = "arm"
diff --git a/meta/classes/qt4x11.bbclass b/meta/classes/qt4x11.bbclass
new file mode 100644
index 000000000..610397d05
--- /dev/null
+++ b/meta/classes/qt4x11.bbclass
@@ -0,0 +1,10 @@
+DEPENDS_prepend = "${@["qt4-x11-free ", ""][(d.getVar('BPN', True)[:12] == 'qt4-x11-free')]}"
+
+inherit qmake2
+
+QT_BASE_NAME = "qt4"
+QT_DIR_NAME = "qt4"
+QT_LIBINFIX = ""
+
+# Qt4 uses atomic instructions not supported in thumb mode
+ARM_INSTRUCTION_SET = "arm"
diff --git a/meta/classes/recipe_sanity.bbclass b/meta/classes/recipe_sanity.bbclass
new file mode 100644
index 000000000..da8ad76c9
--- /dev/null
+++ b/meta/classes/recipe_sanity.bbclass
@@ -0,0 +1,179 @@
+def __note(msg, d):
+ bb.note("%s: recipe_sanity: %s" % (d.getVar("P", True), msg))
+
+__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
+def bad_runtime_vars(cfgdata, d):
+ if bb.data.inherits_class("native", d) or \
+ bb.data.inherits_class("cross", d):
+ return
+
+ for var in d.getVar("__recipe_sanity_badruntimevars", True).split():
+ val = d.getVar(var, 0)
+ if val and val != cfgdata.get(var):
+ __note("%s should be %s_${PN}" % (var, var), d)
+
+__recipe_sanity_reqvars = "DESCRIPTION"
+__recipe_sanity_reqdiffvars = "LICENSE"
+def req_vars(cfgdata, d):
+ for var in d.getVar("__recipe_sanity_reqvars", True).split():
+ if not d.getVar(var, 0):
+ __note("%s should be set" % var, d)
+
+ for var in d.getVar("__recipe_sanity_reqdiffvars", True).split():
+ val = d.getVar(var, 0)
+ cfgval = cfgdata.get(var)
+
+ # Hardcoding is bad, but I'm lazy. We don't care about license being
+ # unset if the recipe has no sources!
+ if var == "LICENSE" and d.getVar("SRC_URI", True) == cfgdata.get("SRC_URI"):
+ continue
+
+ if not val:
+ __note("%s should be set" % var, d)
+ elif val == cfgval:
+ __note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
+
+def var_renames_overwrite(cfgdata, d):
+ renames = d.getVar("__recipe_sanity_renames", 0)
+ if renames:
+ for (key, newkey, oldvalue, newvalue) in renames:
+ if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
+ __note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
+
+def incorrect_nonempty_PACKAGES(cfgdata, d):
+ if bb.data.inherits_class("native", d) or \
+ bb.data.inherits_class("cross", d):
+ if d.getVar("PACKAGES", True):
+ return True
+
+def can_use_autotools_base(cfgdata, d):
+ cfg = d.getVar("do_configure", True)
+ if not bb.data.inherits_class("autotools", d):
+ return False
+
+ for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
+ if cfg.find(i) != -1:
+ return False
+
+ import os
+ for clsfile in d.getVar("__inherit_cache", 0):
+ (base, _) = os.path.splitext(os.path.basename(clsfile))
+ if cfg.find("%s_do_configure" % base) != -1:
+ __note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
+
+ return True
+
+def can_remove_FILESPATH(cfgdata, d):
+ expected = cfgdata.get("FILESPATH")
+ #expected = "${@':'.join([os.path.normpath(os.path.join(fp, p, o)) for fp in d.getVar('FILESPATHBASE', True).split(':') for p in d.getVar('FILESPATHPKG', True).split(':') for o in (d.getVar('OVERRIDES', True) + ':').split(':') if os.path.exists(os.path.join(fp, p, o))])}:${FILESDIR}"
+ expectedpaths = d.expand(expected)
+ unexpanded = d.getVar("FILESPATH", 0)
+ filespath = d.getVar("FILESPATH", True).split(":")
+ filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
+ for fp in filespath:
+ if not fp in expectedpaths:
+ # __note("Path %s in FILESPATH not in the expected paths %s" %
+ # (fp, expectedpaths), d)
+ return False
+ return expected != unexpanded
+
+def can_remove_FILESDIR(cfgdata, d):
+ expected = cfgdata.get("FILESDIR")
+ #expected = "${@bb.which(d.getVar('FILESPATH', True), '.')}"
+ unexpanded = d.getVar("FILESDIR", 0)
+ if unexpanded is None:
+ return False
+
+ expanded = os.path.normpath(d.getVar("FILESDIR", True))
+ filespath = d.getVar("FILESPATH", True).split(":")
+ filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
+
+ return unexpanded != expected and \
+ os.path.exists(expanded) and \
+ (expanded in filespath or
+ expanded == d.expand(expected))
+
+def can_remove_others(p, cfgdata, d):
+ for k in ["S", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
+ "SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
+ #for k in cfgdata:
+ unexpanded = d.getVar(k, 0)
+ cfgunexpanded = cfgdata.get(k)
+ if not cfgunexpanded:
+ continue
+
+ try:
+ expanded = d.getVar(k, True)
+ cfgexpanded = d.expand(cfgunexpanded)
+ except bb.fetch.ParameterError:
+ continue
+
+ if unexpanded != cfgunexpanded and \
+ cfgexpanded == expanded:
+ __note("candidate for removal of %s" % k, d)
+ bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
+ (p, cfgunexpanded, unexpanded, expanded))
+
+python do_recipe_sanity () {
+ p = d.getVar("P", True)
+ p = "%s %s %s" % (d.getVar("PN", True), d.getVar("PV", True), d.getVar("PR", True))
+
+ sanitychecks = [
+ (can_remove_FILESDIR, "candidate for removal of FILESDIR"),
+ (can_remove_FILESPATH, "candidate for removal of FILESPATH"),
+ #(can_use_autotools_base, "candidate for use of autotools_base"),
+ (incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
+ ]
+ cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
+
+ for (func, msg) in sanitychecks:
+ if func(cfgdata, d):
+ __note(msg, d)
+
+ can_remove_others(p, cfgdata, d)
+ var_renames_overwrite(cfgdata, d)
+ req_vars(cfgdata, d)
+ bad_runtime_vars(cfgdata, d)
+}
+do_recipe_sanity[nostamp] = "1"
+#do_recipe_sanity[recrdeptask] = "do_recipe_sanity"
+addtask recipe_sanity
+
+do_recipe_sanity_all[nostamp] = "1"
+do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity"
+do_recipe_sanity_all () {
+ :
+}
+addtask recipe_sanity_all after do_recipe_sanity
+
+python recipe_sanity_eh () {
+ from bb.event import getName
+
+ if getName(e) != "ConfigParsed":
+ return
+
+ d = e.data
+
+ cfgdata = {}
+ for k in d.keys():
+ #for k in ["S", "PR", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
+ # "SECTION"]:
+ cfgdata[k] = d.getVar(k, 0)
+
+ d.setVar("__recipe_sanity_cfgdata", cfgdata)
+ #d.setVar("__recipe_sanity_cfgdata", d)
+
+ # Sick, very sick..
+ from bb.data_smart import DataSmart
+ old = DataSmart.renameVar
+ def myrename(self, key, newkey):
+ oldvalue = self.getVar(newkey, 0)
+ old(self, key, newkey)
+ newvalue = self.getVar(newkey, 0)
+ if oldvalue:
+ renames = self.getVar("__recipe_sanity_renames", 0) or set()
+ renames.add((key, newkey, oldvalue, newvalue))
+ self.setVar("__recipe_sanity_renames", renames)
+ DataSmart.renameVar = myrename
+}
+addhandler recipe_sanity_eh
diff --git a/meta/classes/relocatable.bbclass b/meta/classes/relocatable.bbclass
new file mode 100644
index 000000000..072f533f4
--- /dev/null
+++ b/meta/classes/relocatable.bbclass
@@ -0,0 +1,94 @@
+SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
+
+CHRPATH_BIN ?= "chrpath"
+PREPROCESS_RELOCATE_DIRS ?= ""
+
+def process_dir (directory, d):
+ import subprocess as sub
+ import stat
+
+ cmd = d.expand('${CHRPATH_BIN}')
+ tmpdir = d.getVar('TMPDIR')
+ basedir = d.expand('${base_prefix}')
+
+ #bb.debug("Checking %s for binaries to process" % directory)
+ if not os.path.exists(directory):
+ return
+
+ dirs = os.listdir(directory)
+ for file in dirs:
+ fpath = directory + "/" + file
+ fpath = os.path.normpath(fpath)
+ if os.path.islink(fpath):
+ # Skip symlinks
+ continue
+
+ if os.path.isdir(fpath):
+ process_dir(fpath, d)
+ else:
+ #bb.note("Testing %s for relocatability" % fpath)
+
+ # We need read and write permissions for chrpath, if we don't have
+ # them then set them temporarily. Take a copy of the files
+ # permissions so that we can restore them afterwards.
+ perms = os.stat(fpath)[stat.ST_MODE]
+ if os.access(fpath, os.W_OK|os.R_OK):
+ perms = None
+ else:
+ # Temporarily make the file writeable so we can chrpath it
+ os.chmod(fpath, perms|stat.S_IRWXU)
+
+ p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
+ err, out = p.communicate()
+ # If returned succesfully, process stderr for results
+ if p.returncode != 0:
+ continue
+
+ # Throw away everything other than the rpath list
+ curr_rpath = err.partition("RPATH=")[2]
+ #bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
+ rpaths = curr_rpath.split(":")
+ new_rpaths = []
+ for rpath in rpaths:
+ # If rpath is already dynamic continue
+ if rpath.find("$ORIGIN") != -1:
+ continue
+ # If the rpath shares a root with base_prefix determine a new dynamic rpath from the
+ # base_prefix shared root
+ if rpath.find(basedir) != -1:
+ depth = fpath.partition(basedir)[2].count('/')
+ libpath = rpath.partition(basedir)[2].strip()
+ # otherwise (i.e. cross packages) determine a shared root based on the TMPDIR
+ # NOTE: This will not work reliably for cross packages, particularly in the case
+ # where your TMPDIR is a short path (i.e. /usr/poky) as chrpath cannot insert an
+ # rpath longer than that which is already set.
+ else:
+ depth = fpath.rpartition(tmpdir)[2].count('/')
+ libpath = rpath.partition(tmpdir)[2].strip()
+
+ base = "$ORIGIN"
+ while depth > 1:
+ base += "/.."
+ depth-=1
+ new_rpaths.append("%s%s" % (base, libpath))
+
+ # if we have modified some rpaths call chrpath to update the binary
+ if len(new_rpaths):
+ args = ":".join(new_rpaths)
+ #bb.note("Setting rpath for %s to %s" %(fpath, args))
+ sub.call([cmd, '-r', args, fpath])
+
+ if perms:
+ os.chmod(fpath, perms)
+
+def rpath_replace (path, d):
+ bindirs = d.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}").split()
+
+ for bindir in bindirs:
+ #bb.note ("Processing directory " + bindir)
+ directory = path + "/" + bindir
+ process_dir (directory, d)
+
+python relocatable_binaries_preprocess() {
+ rpath_replace(d.expand('${SYSROOT_DESTDIR}'), d)
+}
diff --git a/meta/classes/rm_work.bbclass b/meta/classes/rm_work.bbclass
index 856914821..997dcd18e 100644
--- a/meta/classes/rm_work.bbclass
+++ b/meta/classes/rm_work.bbclass
@@ -6,6 +6,10 @@
# INHERIT += "rm_work"
#
+# Use the completion scheduler by default when rm_work is active
+# to try and reduce disk usage
+BB_SCHEDULER ?= "completion"
+
RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
BB_DEFAULT_TASK = "rm_work_all"
@@ -15,10 +19,54 @@ do_rm_work () {
do
if [ `basename ${S}` = $dir ]; then
rm -rf $dir
- elif [ $dir != 'temp' ]; then
+ # The package and packages-split directories are retained by sstate for
+ # do_package so we retain them here too. Anything in sstate 'plaindirs'
+ # should be retained. Also retain logs and other files in temp.
+ elif [ $dir != 'temp' ] && [ $dir != 'package' ] && [ $dir != 'packages-split' ]; then
rm -rf $dir
fi
done
+ # Need to add pseudo back or subsqeuent work in this workdir
+ # might fail since setscene may not rerun to recreate it
+ mkdir ${WORKDIR}/pseudo/
+
+ # Change normal stamps into setscene stamps as they better reflect the
+ # fact WORKDIR is now empty
+ # Also leave noexec stamps since setscene stamps don't cover them
+ cd `dirname ${STAMP}`
+ for i in `basename ${STAMP}`*
+ do
+ for j in ${SSTATETASKS}
+ do
+ case $i in
+ *do_setscene*)
+ break
+ ;;
+ *sigdata*)
+ i=dummy
+ break
+ ;;
+ *do_package_write*)
+ i=dummy
+ break
+ ;;
+ *do_build*)
+ i=dummy
+ break
+ ;;
+ *_setscene*)
+ i=dummy
+ break
+ ;;
+ *$j|*$j.*)
+ mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
+ i=dummy
+ break
+ ;;
+ esac
+ done
+ rm -f $i
+ done
}
addtask rm_work after do_${RMWORK_ORIG_TASK}
diff --git a/meta/classes/rootfs_deb.bbclass b/meta/classes/rootfs_deb.bbclass
index 705b363d7..6c9767f98 100644
--- a/meta/classes/rootfs_deb.bbclass
+++ b/meta/classes/rootfs_deb.bbclass
@@ -2,158 +2,122 @@
# Copyright 2006-2007 Openedhand Ltd.
#
-ROOTFS_PKGMANAGE = "run-postinsts dpkg"
+ROOTFS_PKGMANAGE = "run-postinsts dpkg apt"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
-do_rootfs[depends] += "dpkg-native:do_populate_staging apt-native:do_populate_staging"
+do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_deb"
+DEB_POSTPROCESS_COMMANDS = "rootfs_install_all_locales; "
+
+opkglibdir = "${localstatedir}/lib/opkg"
+
+deb_package_setflag() {
+ sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/lib/dpkg/status
+}
+
+deb_package_getflag() {
+ cat ${IMAGE_ROOTFS}/var/lib/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}"
+}
+
fakeroot rootfs_deb_do_rootfs () {
set +e
- mkdir -p ${IMAGE_ROOTFS}/var/dpkg/info
- mkdir -p ${IMAGE_ROOTFS}/var/dpkg/updates
- rm -f ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
- rm -f ${STAGING_ETCDIR_NATIVE}/apt/preferences
- > ${IMAGE_ROOTFS}/var/dpkg/status
- > ${IMAGE_ROOTFS}/var/dpkg/available
- mkdir -p ${IMAGE_ROOTFS}/var/dpkg/alternatives
+ mkdir -p ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
- priority=1
- for arch in ${PACKAGE_ARCHS}; do
- if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
- continue;
- fi
- cd ${DEPLOY_DIR_DEB}/$arch
- # if [ -z "${DEPLOY_KEEP_PACKAGES}" ]; then
- rm -f Packages.gz Packages Packages.bz2
- # fi
- dpkg-scanpackages . | bzip2 > Packages.bz2
- echo "Label: $arch" > Release
-
- echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
- (echo "Package: *"
- echo "Pin: release l=$arch"
- echo "Pin-Priority: $((800 + $priority))"
- echo) >> ${STAGING_ETCDIR_NATIVE}/apt/preferences
- priority=$(expr $priority + 5)
- done
+ # update index
+ package_update_index_deb
- tac ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev > ${STAGING_ETCDIR_NATIVE}/apt/sources.list
+ #install packages
+ export INSTALL_ROOTFS_DEB="${IMAGE_ROOTFS}"
+ export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
+ export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
+ export INSTALL_PACKAGES_NORMAL_DEB="${PACKAGE_INSTALL}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_DEB="${PACKAGE_INSTALL_ATTEMPTONLY}"
+ export INSTALL_PACKAGES_LINGUAS_DEB="${LINGUAS_INSTALL}"
+ export INSTALL_TASK_DEB="rootfs"
- cat "${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample" \
- | sed -e 's#Architecture ".*";#Architecture "${DPKG_ARCH}";#' \
- > "${STAGING_ETCDIR_NATIVE}/apt/apt-rootfs.conf"
+ package_install_internal_deb
+ ${DEB_POSTPROCESS_COMMANDS}
- export APT_CONFIG="${STAGING_ETCDIR_NATIVE}/apt/apt-rootfs.conf"
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
export OPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
- apt-get update
-
- _flag () {
- sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/dpkg/status
- }
- _getflag () {
- cat ${IMAGE_ROOTFS}/var/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}"
- }
-
- if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
- if [ ! -z "${LINGUAS_INSTALL}" ]; then
- apt-get install glibc-localedata-i18n --force-yes --allow-unauthenticated
- if [ $? -ne 0 ]; then
- exit 1
- fi
- for i in ${LINGUAS_INSTALL}; do
- apt-get install $i --force-yes --allow-unauthenticated
- if [ $? -ne 0 ]; then
- exit 1
- fi
- done
- fi
- fi
-
- if [ ! -z "${PACKAGE_INSTALL}" ]; then
- for i in ${PACKAGE_INSTALL}; do
- apt-get install $i --force-yes --allow-unauthenticated
- if [ $? -ne 0 ]; then
- exit 1
- fi
- done
- fi
-
- rm ${WORKDIR}/temp/log.do_$target-attemptonly.${PID}
- if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
- for i in ${PACKAGE_INSTALL_ATTEMPTONLY}; do
- apt-get install $i --force-yes --allow-unauthenticated >> ${WORKDIR}/temp/log.do_rootfs-attemptonly.${PID} || true
- done
- fi
-
- find ${IMAGE_ROOTFS} -name \*.dpkg-new | for i in `cat`; do
- mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
- done
-
- install -d ${IMAGE_ROOTFS}/${sysconfdir}
- echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
-
- # Mark all packages installed
- sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${IMAGE_ROOTFS}/var/dpkg/status
-
# Attempt to run preinsts
# Mark packages with preinst failures as unpacked
- for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.preinst; do
+ for i in ${IMAGE_ROOTFS}/var/lib/dpkg/info/*.preinst; do
if [ -f $i ] && ! sh $i; then
- _flag unpacked `basename $i .preinst`
+ deb_package_setflag unpacked `basename $i .preinst`
fi
done
# Attempt to run postinsts
# Mark packages with postinst failures as unpacked
- for i in ${IMAGE_ROOTFS}/var/dpkg/info/*.postinst; do
+ for i in ${IMAGE_ROOTFS}/var/lib/dpkg/info/*.postinst; do
if [ -f $i ] && ! sh $i configure; then
- _flag unpacked `basename $i .postinst`
+ deb_package_setflag unpacked `basename $i .postinst`
fi
done
set -e
+ install -d ${IMAGE_ROOTFS}/${sysconfdir}
+ echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
+
# Hacks to allow opkg's update-alternatives and opkg to coexist for now
- mkdir -p ${IMAGE_ROOTFS}/usr/lib/opkg
- if [ -e ${IMAGE_ROOTFS}/var/dpkg/alternatives ]; then
- rmdir ${IMAGE_ROOTFS}/var/dpkg/alternatives
+ mkdir -p ${IMAGE_ROOTFS}${opkglibdir}
+ if [ -e ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives ]; then
+ rmdir ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
fi
- ln -s /usr/lib/opkg/alternatives ${IMAGE_ROOTFS}/var/dpkg/alternatives
- ln -s /var/dpkg/info ${IMAGE_ROOTFS}/usr/lib/opkg/info
- ln -s /var/dpkg/status ${IMAGE_ROOTFS}/usr/lib/opkg/status
+ ln -s ${opkglibdir}/alternatives ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
+ ln -s /var/lib/dpkg/info ${IMAGE_ROOTFS}${opkglibdir}/info
+ ln -s /var/lib/dpkg/status ${IMAGE_ROOTFS}${opkglibdir}/status
${ROOTFS_POSTPROCESS_COMMAND}
- log_check rootfs
+ log_check rootfs
+}
+
+remove_packaging_data_files() {
+ rm -rf ${IMAGE_ROOTFS}${opkglibdir}
+ rm -rf ${IMAGE_ROOTFS}/usr/dpkg/
}
-rootfs_deb_log_check() {
- target="$1"
- lf_path="$2"
+DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg --admindir=${IMAGE_ROOTFS}/var/lib/dpkg"
- lf_txt="`cat $lf_path`"
- for keyword_die in "E:"
- do
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
- echo ""
- do_exit=1
- fi
- done
- test "$do_exit" = 1 && exit 1
- true
+list_installed_packages() {
+ ${DPKG_QUERY_COMMAND} -l | grep ^ii | awk '{ print $2 }'
}
-remove_packaging_data_files() {
- rm -rf ${IMAGE_ROOTFS}/usr/lib/opkg/
- rm -rf ${IMAGE_ROOTFS}/usr/dpkg/
+get_package_filename() {
+ fullname=`find ${DEPLOY_DIR_DEB} -name "$1_*.deb" || true`
+ if [ "$fullname" = "" ] ; then
+ echo $name
+ else
+ echo $fullname
+ fi
+}
+
+list_package_depends() {
+ ${DPKG_QUERY_COMMAND} -s $1 | grep ^Depends | sed -e 's/^Depends: //' -e 's/,//g' -e 's:([=<>]* [^ )]*)::g'
+}
+
+list_package_recommends() {
+ ${DPKG_QUERY_COMMAND} -s $1 | grep ^Recommends | sed -e 's/^Recommends: //' -e 's/,//g' -e 's:([=<>]* [^ )]*)::g'
+}
+
+rootfs_check_package_exists() {
+ if [ `apt-cache showpkg $1 | wc -l` -gt 2 ]; then
+ echo $1
+ fi
+}
+
+rootfs_install_packages() {
+ ${STAGING_BINDIR_NATIVE}/apt-get install $@ --force-yes --allow-unauthenticated
+
+ for pkg in $@ ; do
+ deb_package_setflag installed $pkg
+ done
}
diff --git a/meta/classes/rootfs_ipk.bbclass b/meta/classes/rootfs_ipk.bbclass
index 66d3712b8..15800864a 100644
--- a/meta/classes/rootfs_ipk.bbclass
+++ b/meta/classes/rootfs_ipk.bbclass
@@ -5,17 +5,24 @@
# See image.bbclass for a usage of this.
#
-ROOTFS_PKGMANAGE = "opkg opkg-collateral"
+EXTRAOPKGCONFIG ?= ""
+ROOTFS_PKGMANAGE = "opkg opkg-collateral ${EXTRAOPKGCONFIG}"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
-do_rootfs[depends] += "opkg-native:do_populate_staging opkg-utils-native:do_populate_staging"
+do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_ipk"
-IPKG_ARGS = "-f ${IPKGCONF_TARGET} -o ${IMAGE_ROOTFS}"
+IPKG_ARGS = "-f ${IPKGCONF_TARGET} -o ${IMAGE_ROOTFS} --force-overwrite"
OPKG_PREPROCESS_COMMANDS = "package_update_index_ipk; package_generate_ipkg_conf"
-OPKG_POSTPROCESS_COMMANDS = "ipk_insert_feed_uris"
+OPKG_POSTPROCESS_COMMANDS = "ipk_insert_feed_uris; rootfs_install_all_locales; "
+
+opkglibdir = "${localstatedir}/lib/opkg"
+
+# Which packages to not install on the basis of a recommendation
+BAD_RECOMMENDATIONS ?= ""
+MULTILIBRE_ALLOW_REP = "${opkglibdir}"
fakeroot rootfs_ipk_do_rootfs () {
set -x
@@ -26,81 +33,78 @@ fakeroot rootfs_ipk_do_rootfs () {
${OPKG_PREPROCESS_COMMANDS}
mkdir -p ${T}/
- mkdir -p ${IMAGE_ROOTFS}/usr/lib/opkg/
+
+ STATUS=${IMAGE_ROOTFS}${opkglibdir}/status
+ mkdir -p ${IMAGE_ROOTFS}${opkglibdir}
opkg-cl ${IPKG_ARGS} update
- # Uclibc builds don't provide this stuff...
- if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
- if [ ! -z "${LINGUAS_INSTALL}" ]; then
- for i in ${LINGUAS_INSTALL}; do
- opkg-cl ${IPKG_ARGS} install $i
- done
+ # prime the status file with bits that we don't want
+ for i in ${BAD_RECOMMENDATIONS}; do
+ pkginfo="`opkg-cl ${IPKG_ARGS} info $i`"
+ if [ ! -z "$pkginfo" ]; then
+ echo "$pkginfo" | grep -e '^Package:' -e '^Architecture:' -e '^Version:' >> $STATUS
+ echo "Status: deinstall hold not-installed" >> $STATUS
+ echo >> $STATUS
+ else
+ echo "Requested ignored recommendation $i is not a package"
fi
- fi
- if [ ! -z "${PACKAGE_INSTALL}" ]; then
- opkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL}
- fi
+ done
- if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
- opkg-cl ${IPKG_ARGS} install ${PACKAGE_INSTALL_ATTEMPTONLY} > "${WORKDIR}/temp/log.do_rootfs_attemptonly.${PID}" || true
- fi
+ #install
+ export INSTALL_PACKAGES_ATTEMPTONLY_IPK="${PACKAGE_INSTALL_ATTEMPTONLY}"
+ export INSTALL_PACKAGES_LINGUAS_IPK="${LINGUAS_INSTALL}"
+ export INSTALL_TASK_IPK="rootfs"
+
+ export INSTALL_ROOTFS_IPK="${IMAGE_ROOTFS}"
+ export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
+ export INSTALL_PACKAGES_IPK="${PACKAGE_INSTALL}"
+ #post install
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
export OPKG_OFFLINE_ROOT=${IPKG_OFFLINE_ROOT}
- mkdir -p ${IMAGE_ROOTFS}/etc/opkg/
- grep "^arch" ${IPKGCONF_TARGET} >${IMAGE_ROOTFS}/etc/opkg/arch.conf
+ package_install_internal_ipk
+
+ # Distro specific packages should create this
+ #mkdir -p ${IMAGE_ROOTFS}/etc/opkg/
+ #grep "^arch" ${IPKGCONF_TARGET} >${IMAGE_ROOTFS}/etc/opkg/arch.conf
${OPKG_POSTPROCESS_COMMANDS}
${ROOTFS_POSTINSTALL_COMMAND}
- for i in ${IMAGE_ROOTFS}${libdir}/opkg/info/*.preinst; do
- if [ -f $i ] && ! sh $i; then
- opkg-cl ${IPKG_ARGS} flag unpacked `basename $i .preinst`
+ if ${@base_contains("IMAGE_FEATURES", "read-only-rootfs", "true", "false" ,d)}; then
+ if grep Status:.install.ok.unpacked ${STATUS}; then
+ echo "Some packages could not be configured offline and rootfs is read-only."
+ exit 1
fi
- done
- for i in ${IMAGE_ROOTFS}${libdir}/opkg/info/*.postinst; do
- if [ -f $i ] && ! sh $i configure; then
- opkg-cl ${IPKG_ARGS} flag unpacked `basename $i .postinst`
- fi
- done
+ fi
install -d ${IMAGE_ROOTFS}/${sysconfdir}
echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
${ROOTFS_POSTPROCESS_COMMAND}
- rm -f ${IMAGE_ROOTFS}${libdir}/opkg/lists/*
-
- log_check rootfs
-}
+ rm -f ${IMAGE_ROOTFS}${opkglibdir}/lists/*
+ if ${@base_contains("IMAGE_FEATURES", "package-management", "false", "true", d)}; then
+ if ! grep Status:.install.ok.unpacked ${STATUS}; then
+ # All packages were successfully configured.
+ # update-rc.d, base-passwd are no further use, remove them now
+ opkg-cl ${IPKG_ARGS} --force-depends remove update-rc.d base-passwd || true
-rootfs_ipk_log_check() {
- target="$1"
- lf_path="$2"
-
- lf_txt="`cat $lf_path`"
- for keyword_die in "Cannot find package" "exit 1" ERR Fail
- do
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
- echo ""
- do_exit=1
+ # Also delete the status files
+ rm -rf ${IMAGE_ROOTFS}${opkglibdir}
fi
- done
- test "$do_exit" = 1 && exit 1
- true
+ fi
+ set +x
+ log_check rootfs
}
rootfs_ipk_write_manifest() {
manifest=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest
- cp ${IMAGE_ROOTFS}/usr/lib/opkg/status $manifest
+ cp ${IMAGE_ROOTFS}${opkglibdir}/status $manifest
sed '/Depends/d' -i $manifest
sed '/Status/d' -i $manifest
@@ -113,29 +117,47 @@ rootfs_ipk_write_manifest() {
}
remove_packaging_data_files() {
- rm -rf ${IMAGE_ROOTFS}/usr/lib/opkg/
+ rm -rf ${IMAGE_ROOTFS}${opkglibdir}
+ # We need the directory for the package manager lock
+ mkdir ${IMAGE_ROOTFS}${opkglibdir}
}
-install_all_locales() {
+list_installed_packages() {
+ grep ^Package: ${IMAGE_ROOTFS}${opkglibdir}/status | sed "s/^Package: //"
+}
- PACKAGES_TO_INSTALL=""
+get_package_filename() {
+ set +x
+ info=`opkg-cl ${IPKG_ARGS} info $1 | grep -B 7 -A 7 "^Status.* \(\(installed\)\|\(unpacked\)\)" || true`
+ name=`echo "${info}" | awk '/^Package/ {printf $2"_"}'`
+ name=$name`echo "${info}" | awk -F: '/^Version/ {printf $NF"_"}' | sed 's/^\s*//g'`
+ name=$name`echo "${info}" | awk '/^Archi/ {print $2".ipk"}'`
+ set -x
- INSTALLED_PACKAGES=`grep ^Package: ${IMAGE_ROOTFS}${libdir}/opkg/status |sed "s/^Package: //"|egrep -v -- "(-locale-|-dev$|-doc$|^kernel|^glibc|^ttf|^task|^perl|^python)"`
+ fullname=`find ${DEPLOY_DIR_IPK} -name "$name" || true`
+ if [ "$fullname" = "" ] ; then
+ echo $name
+ else
+ echo $fullname
+ fi
+}
+
+list_package_depends() {
+ opkg-cl ${IPKG_ARGS} info $1 | grep ^Depends | sed -e 's/^Depends: //' -e 's/,//g' -e 's:([=<>]* [^ )]*)::g'
+}
- for pkg in $INSTALLED_PACKAGES
- do
- for lang in ${IMAGE_LOCALES}
- do
- if [ `opkg-cl ${IPKG_ARGS} info $pkg-locale-$lang | wc -l` -gt 2 ]
- then
- PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL $pkg-locale-$lang"
- fi
- done
- done
- if [ "$PACKAGES_TO_INSTALL" != "" ]
- then
- opkg-cl ${IPKG_ARGS} install $PACKAGES_TO_INSTALL
- fi
+list_package_recommends() {
+ opkg-cl ${IPKG_ARGS} info $1 | grep ^Recommends | sed -e 's/^Recommends: //' -e 's/,//g' -e 's:([=<>]* [^ )]*)::g'
+}
+
+rootfs_check_package_exists() {
+ if [ `opkg-cl ${IPKG_ARGS} info $1 | wc -l` -gt 2 ]; then
+ echo $1
+ fi
+}
+
+rootfs_install_packages() {
+ opkg-cl ${IPKG_ARGS} install $PACKAGES_TO_INSTALL
}
ipk_insert_feed_uris () {
@@ -153,18 +175,29 @@ ipk_insert_feed_uris () {
# insert new feed-sources
echo "src/gz $feed_name $feed_uri" >> ${IPKGCONF_TARGET}
- done
+ done
+
+ # Allow to use package deploy directory contents as quick devel-testing
+ # feed. This creates individual feed configs for each arch subdir of those
+ # specified as compatible for the current machine.
+ # NOTE: Development-helper feature, NOT a full-fledged feed.
+ if [ -n "${FEED_DEPLOYDIR_BASE_URI}" ]; then
+ for arch in ${PACKAGE_ARCHS}
+ do
+ echo "src/gz local-$arch ${FEED_DEPLOYDIR_BASE_URI}/$arch" >> ${IMAGE_ROOTFS}/etc/opkg/local-$arch-feed.conf
+ done
+ fi
}
python () {
- import bb
- if bb.data.getVar('BUILD_IMAGES_FROM_FEEDS', d, True):
- flags = bb.data.getVarFlag('do_rootfs', 'recrdeptask', d)
+
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask')
flags = flags.replace("do_package_write_ipk", "")
flags = flags.replace("do_deploy", "")
- flags = flags.replace("do_populate_staging", "")
- bb.data.setVarFlag('do_rootfs', 'recrdeptask', flags, d)
- bb.data.setVar('OPKG_PREPROCESS_COMMANDS', "package_generate_archlist\nipk_insert_feed_uris", d)
- bb.data.setVar('OPKG_POSTPROCESS_COMMANDS', '', d)
+ flags = flags.replace("do_populate_sysroot", "")
+ d.setVarFlag('do_rootfs', 'recrdeptask', flags)
+ d.setVar('OPKG_PREPROCESS_COMMANDS', "package_generate_archlist\nipk_insert_feed_uris")
+ d.setVar('OPKG_POSTPROCESS_COMMANDS', '')
}
diff --git a/meta/classes/rootfs_rpm.bbclass b/meta/classes/rootfs_rpm.bbclass
index 1e8ad6d9e..9039b21a0 100644
--- a/meta/classes/rootfs_rpm.bbclass
+++ b/meta/classes/rootfs_rpm.bbclass
@@ -2,170 +2,129 @@
# Creates a root filesystem out of rpm packages
#
-ROOTFS_PKGMANAGE = "rpm yum"
+ROOTFS_PKGMANAGE = "rpm zypper"
-ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
+# Add 50Meg of extra space for zypper database space
+IMAGE_ROOTFS_EXTRA_SPACE_append = "${@base_contains("PACKAGE_INSTALL", "zypper", " + 51200", "" ,d)}"
-do_rootfs[depends] += "rpm-native:do_populate_staging yum-native:do_populate_staging createrepo-native:do_populate_staging fakechroot-native:do_populate_staging"
+# Postinstalls on device are handled within this class at present
+ROOTFS_PKGMANAGE_BOOTSTRAP = ""
-# Needed for update-alternatives
-do_rootfs[depends] += "opkg-native:do_populate_staging"
-
-do_rootfs[recrdeptask] += "do_package_write_rpm"
+do_rootfs[depends] += "rpm-native:do_populate_sysroot"
-YUMCONF = "${IMAGE_ROOTFS}/etc/yum.conf"
-YUMARGS = "--disablerepo=* --enablerepo=poky-feed-* --installroot ${IMAGE_ROOTFS}"
-export YUM_ARCH_FORCE = "${TARGET_ARCH}"
-
-AWKPOSTINSTSCRIPT = "${STAGING_BINDIR_NATIVE}/extract-postinst.awk"
+# Needed for update-alternatives
+do_rootfs[depends] += "opkg-native:do_populate_sysroot"
-RPM_PREPROCESS_COMMANDS = ""
-RPM_POSTPROCESS_COMMANDS = "rpm_insert_feeds_uris"
+# Creating the repo info in do_rootfs
+#do_rootfs[depends] += "createrepo-native:do_populate_sysroot"
-rpm_insert_feeds_uris () {
+do_rootfs[recrdeptask] += "do_package_write_rpm"
- echo "Building from feeds activated!"
+RPM_PREPROCESS_COMMANDS = "package_update_index_rpm; package_generate_rpm_conf; "
+RPM_POSTPROCESS_COMMANDS = "rootfs_install_all_locales; "
- mkdir -p ${IMAGE_ROOTFS}/etc/yum/repos.d/
- for line in ${RPM_FEED_URIS}
- do
- # strip leading and trailing spaces/tabs, then split into name and uri
- line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
- feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
- feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
+#
+# Allow distributions to alter when [postponed] package install scripts are run
+#
+POSTINSTALL_INITPOSITION ?= "98"
- echo "Added $feed_name feed with URL $feed_uri"
+rpmlibdir = "/var/lib/rpm"
+opkglibdir = "${localstatedir}/lib/opkg"
- FEED_FILE=${IMAGE_ROOTFS}/etc/yum/repos.d/$feed_name
+RPMOPTS="--dbpath ${rpmlibdir} --define='_openall_before_chroot 1'"
+RPM="rpm ${RPMOPTS}"
- echo "[poky-feed-$feed_name]" >> $FEED_FILE
- echo "name = $feed_name" >> $FEED_FILE
- echo "baseurl = $feed_uri" >> $FEED_FILE
- echo "gpgcheck = 0" >> $FEED_FILE
- done
-}
+# RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files
+# in ${DEPLOY_DIR_RPM}. This can be removed if package_update_index_rpm can be called concurrently
+do_rootfs[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
fakeroot rootfs_rpm_do_rootfs () {
- set -x
-
- ${RPM_PREPROCESS_COMMANDS}
-
- mkdir -p ${IMAGE_ROOTFS}/etc/rpm/
- echo "${TARGET_ARCH}-linux" >${IMAGE_ROOTFS}/etc/rpm/platform
+ set +x
- # Generate an apprpriate yum.conf
- rm -rf ${YUMCONF}
- cat > ${YUMCONF} << EOF
-[main]
-cachedir=/var/cache2/yum
-keepcache=1
-debuglevel=10
-logfile=/var/log2/yum.log
-exactarch=0
-obsoletes=1
-tolerant=1
+ ${RPM_PREPROCESS_COMMANDS}
-EOF
+ #createrepo "${DEPLOY_DIR_RPM}"
- #priority=1
- mkdir -p ${IMAGE_ROOTFS}${DEPLOY_DIR_RPM}
+ # install packages
+ # This needs to work in the same way as populate_sdk_rpm.bbclass!
+ export INSTALL_ROOTFS_RPM="${IMAGE_ROOTFS}"
+ export INSTALL_PLATFORM_RPM="${TARGET_ARCH}"
+ export INSTALL_CONFBASE_RPM="${RPMCONF_TARGET_BASE}"
+ export INSTALL_PACKAGES_RPM="${PACKAGE_INSTALL}"
+ export INSTALL_PACKAGES_ATTEMPTONLY_RPM="${PACKAGE_INSTALL_ATTEMPTONLY}"
+ export INSTALL_PACKAGES_LINGUAS_RPM="${LINGUAS_INSTALL}"
+ export INSTALL_PROVIDENAME_RPM=""
+ export INSTALL_TASK_RPM="rootfs_rpm_do_rootfs"
- for arch in ${PACKAGE_ARCHS}; do
- if [ ! -d ${DEPLOY_DIR_RPM}/$arch ]; then
- continue;
- fi
- createrepo ${DEPLOY_DIR_RPM}/$arch
+ # Setup base system configuration
+ mkdir -p ${INSTALL_ROOTFS_RPM}/etc/rpm/
- echo "[poky-feed-$arch]" >> ${YUMCONF}
- echo "name = Poky RPM $arch Feed" >> ${YUMCONF}
- echo "baseurl=file://${DEPLOY_DIR_RPM}/$arch" >> ${YUMCONF}
- echo "gpgcheck=0" >> ${YUMCONF}
- echo "" >> ${YUMCONF}
- #priority=$(expr $priority + 5)
-
- # Copy the packages into the target image
- # Ugly ugly ugly but rpm is braindead and can't see outside the chroot
- # when installing :(
- cp -r ${DEPLOY_DIR_RPM}/$arch ${IMAGE_ROOTFS}${DEPLOY_DIR_RPM}/
- done
+ mkdir -p ${INSTALL_ROOTFS_RPM}${rpmlibdir}
+ mkdir -p ${INSTALL_ROOTFS_RPM}${rpmlibdir}/log
+ # After change the __db.* cache size, log file will not be generated automatically,
+ # that will raise some warnings, so touch a bare log for rpm write into it.
+ touch ${INSTALL_ROOTFS_RPM}${rpmlibdir}/log/log.0000000001
+ cat > ${INSTALL_ROOTFS_RPM}${rpmlibdir}/DB_CONFIG << EOF
+# ================ Environment
+set_data_dir .
+set_create_dir .
+set_lg_dir ./log
+set_tmp_dir ./tmp
+# -- thread_count must be >= 8
+set_thread_count 64
- #mkdir -p ${IMAGE_ROOTFS}/var/lib/rpm
- #rpm --root ${IMAGE_ROOTFS} --initdb
- #rpm --root ${IMAGE_ROOTFS} --dbpath ${IMAGE_ROOTFS}/var/lib/rpm -ihv --nodeps --ignoreos
- #rpm -ihv --root ${IMAGE_ROOTFS} ${PACKAGE_INSTALL}
+# ================ Logging
- #package_update_index_rpm
- #package_generate_ipkg_conf
+# ================ Memory Pool
+set_mp_mmapsize 268435456
- # Uclibc builds don't provide this stuff...
- if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
- if [ ! -z "${LINGUAS_INSTALL}" ]; then
- for i in ${LINGUAS_INSTALL}; do
- fakechroot yum ${YUMARGS} -y install $i
- done
- fi
- fi
- if [ ! -z "${PACKAGE_INSTALL}" ]; then
- fakechroot yum ${YUMARGS} -y install ${PACKAGE_INSTALL}
- fi
-
- if [ ! -z "${PACKAGE_INSTALL_ATTEMPTONLY}" ]; then
- fakechroot yum ${YUMARGS} -y install ${PACKAGE_INSTALL_ATTEMPTONLY} > ${WORKDIR}/temp/log.do_rootfs-attemptonly.${PID} || true
- fi
+# ================ Locking
+set_lk_max_locks 16384
+set_lk_max_lockers 16384
+set_lk_max_objects 16384
+mutex_set_max 163840
- # Add any recommended packages to the image
- # (added as an extra script since yum itself doesn't support this)
- yum-install-recommends.py ${IMAGE_ROOTFS} "fakechroot yum ${YUMARGS} -y install"
+# ================ Replication
+EOF
- # Symlinks created under fakeroot are wrong, now we have to fix them...
- cd ${IMAGE_ROOTFS}
- for f in `find . -type l -print`
- do
- link=`readlink $f | sed -e 's#${IMAGE_ROOTFS}##'`
- rm $f
- ln -s $link $f
+ # List must be prefered to least preferred order
+ INSTALL_PLATFORM_EXTRA_RPM=""
+ for each_arch in ${MULTILIB_PACKAGE_ARCHS} ${PACKAGE_ARCHS}; do
+ INSTALL_PLATFORM_EXTRA_RPM="$each_arch $INSTALL_PLATFORM_EXTRA_RPM"
done
+ export INSTALL_PLATFORM_RPM
+
+ package_install_internal_rpm
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
export OPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
- #mkdir -p ${IMAGE_ROOTFS}/etc/opkg/
- #grep "^arch" ${IPKGCONF_TARGET} >${IMAGE_ROOTFS}/etc/opkg/arch.conf
-
${ROOTFS_POSTINSTALL_COMMAND}
- mkdir -p ${IMAGE_ROOTFS}/etc/rpm-postinsts/
- rpm --root ${IMAGE_ROOTFS} -aq --queryformat 'Name: %{NAME}\n' --scripts > ${IMAGE_ROOTFS}/etc/rpm-postinsts/combined
- awk -f ${AWKPOSTINSTSCRIPT} < ${IMAGE_ROOTFS}/etc/rpm-postinsts/combined
- rm ${IMAGE_ROOTFS}/etc/rpm-postinsts/combined
-
- for i in ${IMAGE_ROOTFS}/etc/rpm-postinsts/*.sh; do
- if [ -f $i ] && sh $i; then
- # rm $i
- mv $i $i.done
- fi
+ # Report delayed package scriptlets
+ for i in ${IMAGE_ROOTFS}/etc/rpm-postinsts/*; do
+ echo "Delayed package scriptlet: `head -n 3 $i | tail -n 1`"
done
install -d ${IMAGE_ROOTFS}/${sysconfdir}/rcS.d
# Stop $i getting expanded below...
i=\$i
- cat > ${IMAGE_ROOTFS}${sysconfdir}/rcS.d/S98configure << EOF
+ cat > ${IMAGE_ROOTFS}${sysconfdir}/rcS.d/S${POSTINSTALL_INITPOSITION}run-postinsts << EOF
#!/bin/sh
-for i in /etc/rpm-postinsts/*.sh; do
+for i in /etc/rpm-postinsts/*; do
echo "Running postinst $i..."
- if [ -f $i ] && sh $i; then
- # rm $i
- mv $i $i.done
+ if [ -f $i ] && $i; then
+ rm $i
else
echo "ERROR: postinst $i failed."
fi
done
-rm -f ${sysconfdir}/rcS.d/S98configure
+rm -f ${sysconfdir}/rcS.d/S${POSTINSTALL_INITPOSITION}run-postinsts
EOF
- chmod 0755 ${IMAGE_ROOTFS}${sysconfdir}/rcS.d/S98configure
+ chmod 0755 ${IMAGE_ROOTFS}${sysconfdir}/rcS.d/S${POSTINSTALL_INITPOSITION}run-postinsts
install -d ${IMAGE_ROOTFS}/${sysconfdir}
echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
@@ -176,71 +135,109 @@ EOF
rm -rf ${IMAGE_ROOTFS}/var/cache2/
rm -rf ${IMAGE_ROOTFS}/var/run2/
rm -rf ${IMAGE_ROOTFS}/var/log2/
- rm -rf ${IMAGE_ROOTFS}${DEPLOY_DIR_RPM}/
# remove lock files
- rm -f ${IMAGE_ROOTFS}/var/lib/rpm/__db.*
+ rm -f ${IMAGE_ROOTFS}${rpmlibdir}/__db.*
- # remove no longer used yum.conf
- rm -f ${IMAGE_ROOTFS}/etc/yum.conf
+ # Move manifests into the directory with the logs
+ mv ${IMAGE_ROOTFS}/install/*.manifest ${T}/
- log_check rootfs
-}
+ # Remove all remaining resolver files
+ rm -rf ${IMAGE_ROOTFS}/install
-rootfs_rpm_log_check() {
- target="$1"
- lf_path="$2"
+ log_check rootfs
- lf_txt="`cat $lf_path`"
- for keyword_die in "Cannot find package" "exit 1" ERR Fail
- do
- if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
- then
- echo "log_check: There were error messages in the logfile"
- echo -e "log_check: Matched keyword: [$keyword_die]\n"
- echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
- echo ""
- do_exit=1
- fi
- done
- test "$do_exit" = 1 && exit 1
- true
+ # Workaround so the parser knows we need the resolve_package function!
+ if false ; then
+ resolve_package_rpm foo ${RPMCONF_TARGET_BASE}.conf || true
+ fi
}
remove_packaging_data_files() {
- rm -rf ${IMAGE_ROOTFS}/usr/lib/opkg/
+ rm -rf ${IMAGE_ROOTFS}${rpmlibdir}
+ rm -rf ${IMAGE_ROOTFS}${opkglibdir}
+}
+
+RPM_QUERY_CMD = '${RPM} --root ${IMAGE_ROOTFS} -D "_dbpath ${rpmlibdir}" \
+ -D "__dbi_txn create nofsync private"'
+
+list_installed_packages() {
+ ${RPM_QUERY_CMD} -qa --qf "[%{NAME}\n]"
+}
+
+get_package_filename() {
+ resolve_package_rpm ${RPMCONF_TARGET_BASE}-base_archs.conf $1
}
-install_all_locales() {
+list_package_depends() {
+ pkglist=`list_installed_packages`
+
+ # REQUIRE* lists "soft" requirements (which we know as recommends and RPM refers to
+ # as "suggests") so filter these out with the help of awk
+ for req in `${RPM_QUERY_CMD} -q --qf "[%{REQUIRENAME} %{REQUIREFLAGS}\n]" $1 | awk '{ if( and($2, 0x80000) == 0) print $1 }'`; do
+ if echo "$req" | grep -q "^rpmlib" ; then continue ; fi
- PACKAGES_TO_INSTALL=""
+ realpkg=""
+ for dep in $pkglist; do
+ if [ "$dep" = "$req" ] ; then
+ realpkg="1"
+ echo $req
+ break
+ fi
+ done
- INSTALLED_PACKAGES=`grep ^Package: ${IMAGE_ROOTFS}${libdir}/opkg/status |sed "s/^Package: //"|egrep -v -- "(-locale-|-dev$|-doc$|^kernel|^glibc|^ttf|^task|^perl|^python)"`
+ if [ "$realdep" = "" ] ; then
+ ${RPM_QUERY_CMD} -q --whatprovides $req --qf "%{NAME}\n"
+ fi
+ done
+}
- for pkg in $INSTALLED_PACKAGES
- do
- for lang in ${IMAGE_LOCALES}
- do
- if [ `opkg-cl ${IPKG_ARGS} info $pkg-locale-$lang | wc -l` -gt 2 ]
- then
- PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL $pkg-locale-$lang"
- fi
- done
- done
- if [ "$PACKAGES_TO_INSTALL" != "" ]
- then
- opkg-cl ${IPKG_ARGS} install $PACKAGES_TO_INSTALL
- fi
+list_package_recommends() {
+ ${RPM_QUERY_CMD} -q --suggests $1
+}
+
+rootfs_check_package_exists() {
+ resolve_package_rpm ${RPMCONF_TARGET_BASE}-base_archs.conf $1
+}
+
+rootfs_install_packages() {
+ # The pkg to be installed here is not controlled by the
+ # package_install_internal_rpm, so it may have already been
+ # installed(e.g, installed in the first time when generate the
+ # rootfs), use '--replacepkgs' to always install them
+ for pkg in $@; do
+ ${RPM} --root ${IMAGE_ROOTFS} -D "_dbpath ${rpmlibdir}" \
+ -D "__dbi_txn create nofsync private" \
+ --noscripts --notriggers --noparentdirs --nolinktos \
+ --replacepkgs -Uhv $pkg || true
+ done
}
python () {
- import bb
- if bb.data.getVar('BUILD_IMAGES_FROM_FEEDS', d, True):
- flags = bb.data.getVarFlag('do_rootfs', 'recrdeptask', d)
+ if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
+ flags = d.getVarFlag('do_rootfs', 'recrdeptask')
flags = flags.replace("do_package_write_rpm", "")
flags = flags.replace("do_deploy", "")
- flags = flags.replace("do_populate_staging", "")
- bb.data.setVarFlag('do_rootfs', 'recrdeptask', flags, d)
- bb.data.setVar('RPM_PREPROCESS_COMMANDS', "rpm_insert_feed_uris", d)
- bb.data.setVar('RPM_POSTPROCESS_COMMANDS', '', d)
+ flags = flags.replace("do_populate_sysroot", "")
+ d.setVarFlag('do_rootfs', 'recrdeptask', flags)
+ d.setVar('RPM_PREPROCESS_COMMANDS', '')
+ d.setVar('RPM_POSTPROCESS_COMMANDS', '')
+
+ ml_package_archs = ""
+ ml_prefix_list = ""
+ multilibs = d.getVar('MULTILIBS', True) or ""
+ for ext in multilibs.split():
+ eext = ext.split(':')
+ if len(eext) > 1 and eext[0] == 'multilib':
+ localdata = bb.data.createCopy(d)
+ default_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + eext[1], False)
+ if default_tune:
+ localdata.setVar("DEFAULTTUNE", default_tune)
+ package_archs = localdata.getVar("PACKAGE_ARCHS", True) or ""
+ package_archs = " ".join([i in "all noarch any".split() and i or eext[1]+"_"+i for i in package_archs.split()])
+ ml_package_archs += " " + package_archs
+ ml_prefix_list += " " + eext[1]
+ #bb.note("ML_PACKAGE_ARCHS %s %s %s" % (eext[1], localdata.getVar("PACKAGE_ARCHS", True) or "(none)", overrides))
+ d.setVar('MULTILIB_PACKAGE_ARCHS', ml_package_archs)
+ d.setVar('MULTILIB_PREFIX_LIST', ml_prefix_list)
}
diff --git a/meta/classes/sanity.bbclass b/meta/classes/sanity.bbclass
index 5cf067795..ff3c41301 100644
--- a/meta/classes/sanity.bbclass
+++ b/meta/classes/sanity.bbclass
@@ -2,172 +2,547 @@
# Sanity check the users setup for common misconfigurations
#
+SANITY_REQUIRED_UTILITIES ?= "patch diffstat texi2html makeinfo svn bzip2 tar gzip gawk chrpath wget cpio"
+
def raise_sanity_error(msg):
- import bb
- bb.fatal(""" Poky's config sanity checker detected a potential misconfiguration.
- Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
- Following is the list of potential problems / advisories:
-
- %s""" % msg)
+ bb.fatal(""" OE-core's config sanity checker detected a potential misconfiguration.
+ Either fix the cause of this error or at your own risk disable the checker (see sanity.conf).
+ Following is the list of potential problems / advisories:
+
+ %s""" % msg)
+
+# Check a single tune for validity.
+def check_toolchain_tune(data, tune, multilib):
+ tune_errors = []
+ if not tune:
+ return "No tuning found for %s multilib." % multilib
+ bb.debug(2, "Sanity-checking tuning '%s' (%s) features:" % (tune, multilib))
+ features = (data.getVar("TUNE_FEATURES_tune-%s" % tune, True) or "").split()
+ if not features:
+ return "Tuning '%s' has no defined features, and cannot be used." % tune
+ valid_tunes = data.getVarFlags('TUNEVALID') or {}
+ conflicts = data.getVarFlags('TUNECONFLICTS') or {}
+ # [doc] is the documentation for the variable, not a real feature
+ if 'doc' in valid_tunes:
+ del valid_tunes['doc']
+ if 'doc' in conflicts:
+ del conflicts['doc']
+ for feature in features:
+ if feature in conflicts:
+ for conflict in conflicts[feature].split():
+ if conflict in features:
+ tune_errors.append("Feature '%s' conflicts with '%s'." %
+ (feature, conflict))
+ if feature in valid_tunes:
+ bb.debug(2, " %s: %s" % (feature, valid_tunes[feature]))
+ else:
+ tune_errors.append("Feature '%s' is not defined." % feature)
+ whitelist = data.getVar("TUNEABI_WHITELIST", True) or ''
+ override = data.getVar("TUNEABI_OVERRIDE", True) or ''
+ if whitelist:
+ tuneabi = data.getVar("TUNEABI_tune-%s" % tune, True) or ''
+ if not tuneabi:
+ tuneabi = tune
+ if True not in [x in whitelist.split() for x in tuneabi.split()]:
+ tune_errors.append("Tuning '%s' (%s) cannot be used with any supported tuning/ABI." %
+ (tune, tuneabi))
+ if tune_errors:
+ return "Tuning '%s' has the following errors:\n" + '\n'.join(tune_errors)
+
+def check_toolchain(data):
+ tune_error_set = []
+ deftune = data.getVar("DEFAULTTUNE", True)
+ tune_errors = check_toolchain_tune(data, deftune, 'default')
+ if tune_errors:
+ tune_error_set.append(tune_errors)
+
+ multilibs = (data.getVar("MULTILIB_VARIANTS", True) or "").split()
+ if multilibs:
+ seen_libs = []
+ seen_tunes = []
+ for lib in multilibs:
+ if lib in seen_libs:
+ tune_error_set.append("The multilib '%s' appears more than once." % lib)
+ else:
+ seen_libs.append(lib)
+ tune = data.getVar("DEFAULTTUNE_virtclass-multilib-%s" % lib, True)
+ if tune in seen_tunes:
+ tune_error_set.append("The tuning '%s' appears in more than one multilib." % tune)
+ else:
+ seen_libs.append(tune)
+ if tune == deftune:
+ tune_error_set.append("Multilib '%s' (%s) is also the default tuning." % (lib, deftune))
+ else:
+ tune_errors = check_toolchain_tune(data, tune, lib)
+ if tune_errors:
+ tune_error_set.append(tune_errors)
+ if tune_error_set:
+ return "Toolchain tunings invalid:\n" + '\n'.join(tune_error_set)
+
+ return ""
def check_conf_exists(fn, data):
- import bb, os
+ bbpath = []
+ fn = data.expand(fn)
+ vbbpath = data.getVar("BBPATH")
+ if vbbpath:
+ bbpath += vbbpath.split(":")
+ for p in bbpath:
+ currname = os.path.join(data.expand(p), fn)
+ if os.access(currname, os.R_OK):
+ return True
+ return False
+
+def check_sanity_sstate_dir_change(sstate_dir, data):
+ # Sanity checks to be done when the value of SSTATE_DIR changes
+
+ testmsg = ""
+ if sstate_dir != "":
+ # Check that the user can read and write to SSTATE_DIR
+ sstatemsg = check_can_read_write_directory(sstate_dir) or None
+ if sstatemsg:
+ sstatemsg = sstatemsg + ". You could try using it as an SSTATE_MIRRORS instead of SSTATE_CACHE.\n"
+ testmsg = testmsg + sstatemsg
+ # Check that SSTATE_DIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ testmsg = testmsg + check_create_long_filename(sstate_dir, "SSTATE_DIR")
+
+ return testmsg
+
+def check_sanity_tmpdir_change(tmpdir, data):
+ # Sanity checks to be done when the value of TMPDIR changes
+
+ # Check that TMPDIR isn't on a filesystem with limited filename length (eg. eCryptFS)
+ testmsg = check_create_long_filename(tmpdir, "TMPDIR")
+ # Check that we can fetch from various network transports
+ testmsg = testmsg + check_connectivity(data)
+ return testmsg
+
+def check_sanity_version_change(data):
+ # Sanity checks to be done when SANITY_VERSION changes
+ return ""
+
+def check_pseudo_wrapper():
+ import sys
+ if not sys.argv[0].endswith('/bitbake'):
+ return ""
+
+ import subprocess as sub
+ # Check if bitbake wrapper is being used
+ pseudo_build = os.environ.get( 'PSEUDO_BUILD' )
+ if not pseudo_build:
+ bb.warn("Bitbake has not been run using the bitbake wrapper (scripts/bitbake); this is likely because your PATH has been altered from that normally set up by the oe-init-build-env script. Not using the wrapper may result in failures during package installation, so it is highly recommended that you set your PATH back so that the wrapper script is being executed.")
+
+ if (not pseudo_build) or pseudo_build == '2':
+ # pseudo ought to be working, let's see if it is...
+ p = sub.Popen(['sh', '-c', 'PSEUDO_DISABLED=0 id -u'],stdout=sub.PIPE,stderr=sub.PIPE)
+ out, err = p.communicate()
+ if out.rstrip() != '0':
+ msg = "Pseudo is not functioning correctly, which will cause failures during package installation. Please check your configuration."
+ if pseudo_build == '2':
+ return msg
+ else:
+ bb.warn(msg)
+ return ""
+
+def check_create_long_filename(filepath, pathname):
+ testfile = os.path.join(filepath, ''.join([`num`[-1] for num in xrange(1,200)]))
+ try:
+ if not os.path.exists(filepath):
+ bb.utils.mkdirhier(filepath)
+ f = file(testfile, "w")
+ f.close()
+ os.remove(testfile)
+ except IOError as (errno, strerror):
+ if errno == 36: # ENAMETOOLONG
+ return "Failed to create a file with a long name in %s. Please use a filesystem that does not unreasonably limit filename length.\n" % pathname
+ else:
+ return "Failed to create a file in %s: %s\n" % (pathname, strerror)
+ return ""
+
+def check_can_read_write_directory(directory):
+ if not os.access(directory, os.R_OK|os.W_OK):
+ return "Insufficient permissions for %s" % directory
+ return ""
+
+def check_connectivity(d):
+ # URI's to check can be set in the CONNECTIVITY_CHECK_URIS variable
+ # using the same syntax as for SRC_URI. If the variable is not set
+ # the check is skipped
+ test_uris = (d.getVar('CONNECTIVITY_CHECK_URIS', True) or "").split()
+ retval = ""
+
+ # Only check connectivity if network enabled and the
+ # CONNECTIVITY_CHECK_URIS are set
+ network_enabled = not d.getVar('BB_NO_NETWORK', True)
+ check_enabled = len(test_uris)
+ # Take a copy of the data store and unset MIRRORS and PREMIRROS
+ data = bb.data.createCopy(d)
+ data.delVar('PREMIRRORS')
+ data.delVar('MIRRORS')
+ if check_enabled and network_enabled:
+ try:
+ fetcher = bb.fetch2.Fetch(test_uris, data)
+ fetcher.checkstatus()
+ except Exception:
+ # Allow the message to be configured so that users can be
+ # pointed to a support mechanism.
+ msg = data.getVar('CONNECTIVITY_CHECK_MSG', True) or ""
+ if len(msg) == 0:
+ msg = "Failed to fetch test data from the network. Please ensure your network is configured correctly.\n"
+ retval = msg
+
+ return retval
+
+def check_supported_distro(e):
+ tested_distros = e.data.getVar('SANITY_TESTED_DISTROS', True)
+ if not tested_distros:
+ return
+
+ if os.path.exists("/etc/redhat-release"):
+ f = open("/etc/redhat-release", "r")
+ try:
+ distro = f.readline().strip()
+ finally:
+ f.close()
+ elif os.path.exists("/etc/SuSE-release"):
+ import re
+ f = open("/etc/SuSE-release", "r")
+ try:
+ distro = f.readline()
+ # Remove the architecture suffix e.g. (i586)
+ distro = re.sub(r' \([a-zA-Z0-9\-_]*\)$', '', distro).strip()
+ finally:
+ f.close()
+ else:
+ # Use LSB method
+ import subprocess as sub
+ try:
+ p = sub.Popen(['lsb_release','-d','-s'],stdout=sub.PIPE,stderr=sub.PIPE)
+ out, err = p.communicate()
+ distro = out.rstrip()
+ except Exception:
+ distro = None
+
+ if not distro:
+ if os.path.exists("/etc/lsb-release"):
+ f = open("/etc/lsb-release", "r")
+ try:
+ for line in f:
+ lns = line.split('=')
+ if lns[0] == "DISTRIB_DESCRIPTION":
+ distro = lns[1].strip('"\n')
+ break
+ finally:
+ f.close()
+ if distro:
+ if distro not in [x.strip() for x in tested_distros.split('\\n')]:
+ bb.warn('Host distribution "%s" has not been validated with this version of the build system; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.' % distro)
+ else:
+ bb.warn('Host distribution could not be determined; you may possibly experience unexpected failures. It is recommended that you use a tested distribution.')
+
+# Checks we should only make if MACHINE is set correctly
+def check_sanity_validmachine(e):
+ from bb import data
+
+ messages = ""
+
+ # Check TUNE_ARCH is set
+ if data.getVar('TUNE_ARCH', e.data, True) == 'INVALID':
+ messages = messages + 'TUNE_ARCH is unset. Please ensure your MACHINE configuration includes a valid tune configuration file which will set this correctly.\n'
+
+ # Check TARGET_ARCH is set correctly
+ if data.getVar('TARGE_ARCH', e.data, False) == '${TUNE_ARCH}':
+ messages = messages + 'TARGET_ARCH is being overwritten, likely by your MACHINE configuration files.\nPlease use a valid tune configuration file which should set this correctly automatically\nand avoid setting this in the machine configuration. See the OE-Core mailing list for more information.\n'
+
+ # Check TARGET_OS is set
+ if data.getVar('TARGET_OS', e.data, True) == 'INVALID':
+ messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
+
+ # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
+ pkgarchs = data.getVar('PACKAGE_ARCHS', e.data, True)
+ tunepkg = data.getVar('TUNE_PKGARCH', e.data, True)
+ tunefound = False
+ seen = {}
+ dups = []
+
+ for pa in pkgarchs.split():
+ if seen.get(pa, 0) == 1:
+ dups.append(pa)
+ else:
+ seen[pa] = 1
+ if pa == tunepkg:
+ tunefound = True
+
+ if len(dups):
+ messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
+
+ if tunefound == False:
+ messages = messages + "Error, the PACKAGE_ARCHS variable does not contain TUNE_PKGARCH (%s)." % tunepkg
+
+ return messages
- bbpath = []
- fn = bb.data.expand(fn, data)
- vbbpath = bb.data.getVar("BBPATH", data)
- if vbbpath:
- bbpath += vbbpath.split(":")
- for p in bbpath:
- currname = os.path.join(bb.data.expand(p, data), fn)
- if os.access(currname, os.R_OK):
- return True
- return False
def check_sanity(e):
- from bb import note, error, data, __version__
- from bb.event import Handled, NotHandled, getName
- try:
- from distutils.version import LooseVersion
- except ImportError:
- def LooseVersion(v): print "WARNING: sanity.bbclass can't compare versions without python-distutils"; return 1
- import os, commands
+ from bb import note, error, data, __version__
+
+ try:
+ from distutils.version import LooseVersion
+ except ImportError:
+ def LooseVersion(v): print "WARNING: sanity.bbclass can't compare versions without python-distutils"; return 1
+ import commands
+
+ # Check the bitbake version meets minimum requirements
+ minversion = data.getVar('BB_MIN_VERSION', e.data , True)
+ if not minversion:
+ # Hack: BB_MIN_VERSION hasn't been parsed yet so return
+ # and wait for the next call
+ print "Foo %s" % minversion
+ return
+
+ if 0 == os.getuid():
+ raise_sanity_error("Do not use Bitbake as root.")
+
+ messages = ""
+
+ # Check the Python version, we now use Python 2.6 features in
+ # various classes
+ import sys
+ if sys.hexversion < 0x020600F0:
+ messages = messages + 'The system requires at least Python 2.6 to run. Please update your Python interpreter.\n'
+
+ if (LooseVersion(__version__) < LooseVersion(minversion)):
+ messages = messages + 'Bitbake version %s is required and version %s was found\n' % (minversion, __version__)
- # Check the bitbake version meets minimum requirements
- minversion = data.getVar('BB_MIN_VERSION', e.data , True)
- if not minversion:
- # Hack: BB_MIN_VERSION hasn't been parsed yet so return
- # and wait for the next call
- print "Foo %s" % minversion
- return
+ # Check that the MACHINE is valid, if it is set
+ if data.getVar('MACHINE', e.data, True):
+ if not check_conf_exists("conf/machine/${MACHINE}.conf", e.data):
+ messages = messages + 'Please set a valid MACHINE in your local.conf or environment\n'
+ else:
+ messages = messages + check_sanity_validmachine(e)
+ else:
+ messages = messages + 'Please set a MACHINE in your local.conf or environment\n'
- if 0 == os.getuid():
- raise_sanity_error("Do not use Bitbake as root.")
+ # Check we are using a valid lacal.conf
+ current_conf = data.getVar('CONF_VERSION', e.data, True)
+ conf_version = data.getVar('LOCALCONF_VERSION', e.data, True)
- messages = ""
+ if current_conf != conf_version:
+ messages = messages + "Your version of local.conf was generated from an older version of local.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/local.conf conf/local.conf.sample\" is a good way to visualise the changes.\n"
- if (LooseVersion(__version__) < LooseVersion(minversion)):
- messages = messages + 'Bitbake version %s is required and version %s was found\n' % (minversion, __version__)
+ # Check bblayers.conf is valid
+ current_lconf = data.getVar('LCONF_VERSION', e.data, True)
+ lconf_version = data.getVar('LAYER_CONF_VERSION', e.data, True)
+ if current_lconf != lconf_version:
+ messages = messages + "Your version of bblayers.conf was generated from an older version of bblayers.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/bblayers.conf conf/bblayers.conf.sample\" is a good way to visualise the changes.\n"
- # Check TARGET_ARCH is set
- if data.getVar('TARGET_ARCH', e.data, True) == 'INVALID':
- messages = messages + 'Please set TARGET_ARCH directly, or choose a MACHINE or DISTRO that does so.\n'
-
- # Check TARGET_OS is set
- if data.getVar('TARGET_OS', e.data, True) == 'INVALID':
- messages = messages + 'Please set TARGET_OS directly, or choose a MACHINE or DISTRO that does so.\n'
+ # If we have a site.conf, check it's valid
+ if check_conf_exists("conf/site.conf", e.data):
+ current_sconf = data.getVar('SCONF_VERSION', e.data, True)
+ sconf_version = data.getVar('SITE_CONF_VERSION', e.data, True)
+ if current_sconf != sconf_version:
+ messages = messages + "Your version of site.conf was generated from an older version of site.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/site.conf conf/site.conf.sample\" is a good way to visualise the changes.\n"
- assume_provided = data.getVar('ASSUME_PROVIDED', e.data , True).split()
- # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
- if "diffstat-native" not in assume_provided:
- messages = messages + 'Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n'
-
- # Check that the MACHINE is valid, if it is set
- if data.getVar('MACHINE', e.data, True):
- if not check_conf_exists("conf/machine/${MACHINE}.conf", e.data):
- messages = messages + 'Please set a valid MACHINE in your local.conf\n'
-
- # Check that the DISTRO is valid
- # need to take into account DISTRO renaming DISTRO
- if not ( check_conf_exists("conf/distro/${DISTRO}.conf", e.data) or check_conf_exists("conf/distro/include/${DISTRO}.inc", e.data) ):
- messages = messages + "DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % data.getVar("DISTRO", e.data, True )
+ assume_provided = data.getVar('ASSUME_PROVIDED', e.data , True).split()
+ # Check user doesn't have ASSUME_PROVIDED = instead of += in local.conf
+ if "diffstat-native" not in assume_provided:
+ messages = messages + 'Please use ASSUME_PROVIDED +=, not ASSUME_PROVIDED = in your local.conf\n'
- missing = ""
+ # Check that DL_DIR is set, exists and is writable. In theory, we should never even hit the check if DL_DIR isn't
+ # set, since so much relies on it being set.
+ dldir = data.getVar('DL_DIR', e.data, True)
+ if not dldir:
+ messages = messages + "DL_DIR is not set. Your environment is misconfigured, check that DL_DIR is set, and if the directory exists, that it is writable. \n"
+ if os.path.exists(dldir) and not os.access(dldir, os.W_OK):
+ messages = messages + "DL_DIR: %s exists but you do not appear to have write access to it. \n" % dldir
+
+ # Check that the DISTRO is valid, if set
+ # need to take into account DISTRO renaming DISTRO
+ distro = data.getVar('DISTRO', e.data, True)
+ if distro:
+ if not ( check_conf_exists("conf/distro/${DISTRO}.conf", e.data) or check_conf_exists("conf/distro/include/${DISTRO}.inc", e.data) ):
+ messages = messages + "DISTRO '%s' not found. Please set a valid DISTRO in your local.conf\n" % data.getVar("DISTRO", e.data, True )
- if not check_app_exists("${MAKE}", e.data):
- missing = missing + "GNU make,"
+ missing = ""
- if not check_app_exists('${BUILD_PREFIX}gcc', e.data):
- missing = missing + "C Compiler (%sgcc)," % data.getVar("BUILD_PREFIX", e.data, True)
+ if not check_app_exists("${MAKE}", e.data):
+ missing = missing + "GNU make,"
- if not check_app_exists('${BUILD_PREFIX}g++', e.data):
- missing = missing + "C++ Compiler (%sg++)," % data.getVar("BUILD_PREFIX", e.data, True)
+ if not check_app_exists('${BUILD_PREFIX}gcc', e.data):
+ missing = missing + "C Compiler (%sgcc)," % data.getVar("BUILD_PREFIX", e.data, True)
- required_utilities = "patch help2man diffstat texi2html makeinfo cvs svn bzip2 tar gzip gawk"
+ if not check_app_exists('${BUILD_PREFIX}g++', e.data):
+ missing = missing + "C++ Compiler (%sg++)," % data.getVar("BUILD_PREFIX", e.data, True)
- # qemu-native needs gcc 3.x
- if "qemu-native" not in assume_provided and "gcc3-native" in assume_provided:
- gcc_version = commands.getoutput("${BUILD_PREFIX}gcc --version | head -n 1 | cut -f 3 -d ' '")
+ required_utilities = e.data.getVar('SANITY_REQUIRED_UTILITIES', True)
- if not check_gcc3(e.data) and gcc_version[0] != '3':
- messages = messages + "gcc3-native was in ASSUME_PROVIDED but the gcc-3.x binary can't be found in PATH"
- missing = missing + "gcc-3.x (needed for qemu-native),"
+ if "qemu-native" in assume_provided:
+ if not check_app_exists("qemu-arm", e.data):
+ messages = messages + "qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH"
- if "qemu-native" in assume_provided:
- if not check_app_exists("qemu-arm", e.data):
- messages = messages + "qemu-native was in ASSUME_PROVIDED but the QEMU binaries (qemu-arm) can't be found in PATH"
+ paths = data.getVar('PATH', e.data, True).split(":")
+ if "." in paths or "" in paths:
+ messages = messages + "PATH contains '.' or '', which will break the build, please remove this."
- if data.getVar('TARGET_ARCH', e.data, True) == "arm":
- if os.path.exists("/proc/sys/vm/mmap_min_addr"):
- f = file("/proc/sys/vm/mmap_min_addr", "r")
- if (f.read().strip() != "0"):
- messages = messages + "/proc/sys/vm/mmap_min_addr is not 0. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 0 in /etc/sysctl.conf.\n"
- f.close()
+ if data.getVar('TARGET_ARCH', e.data, True) == "arm":
+ # This path is no longer user-readable in modern (very recent) Linux
+ try:
+ if os.path.exists("/proc/sys/vm/mmap_min_addr"):
+ f = open("/proc/sys/vm/mmap_min_addr", "r")
+ try:
+ if (int(f.read().strip()) > 65536):
+ messages = messages + "/proc/sys/vm/mmap_min_addr is not <= 65536. This will cause problems with qemu so please fix the value (as root).\n\nTo fix this in later reboots, set vm.mmap_min_addr = 65536 in /etc/sysctl.conf.\n"
+ finally:
+ f.close()
+ except:
+ pass
- for util in required_utilities.split():
- if not check_app_exists( util, e.data ):
- missing = missing + "%s," % util
+ for util in required_utilities.split():
+ if not check_app_exists( util, e.data ):
+ missing = missing + "%s," % util
- if missing != "":
- missing = missing.rstrip(',')
- messages = messages + "Please install following missing utilities: %s\n" % missing
+ if missing != "":
+ missing = missing.rstrip(',')
+ messages = messages + "Please install following missing utilities: %s\n" % missing
- if os.path.basename(os.readlink('/bin/sh')) == 'dash':
- messages = messages + "Using dash as /bin/sh causes various subtle build problems, please use bash instead.\n"
+ pseudo_msg = check_pseudo_wrapper()
+ if pseudo_msg != "":
+ messages = messages + pseudo_msg + '\n'
- omask = os.umask(022)
- if omask & 0755:
- messages = messages + "Please use a umask which allows a+rx and u+rwx\n"
- os.umask(omask)
+ check_supported_distro(e)
+ toolchain_msg = check_toolchain(e.data)
+ if toolchain_msg != "":
+ messages = messages + toolchain_msg + '\n'
- oes_bb_conf = data.getVar( 'OES_BITBAKE_CONF', e.data, True )
- if not oes_bb_conf:
- messages = messages + 'You do not include OpenEmbeddeds version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n'
+ # Check if DISPLAY is set if IMAGETEST is set
+ if not data.getVar( 'DISPLAY', e.data, True ) and data.getVar( 'IMAGETEST', e.data, True ) == 'qemu':
+ messages = messages + 'qemuimagetest needs a X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n'
- #
- # Check that TMPDIR hasn't changed location since the last time we were run
- #
- tmpdir = data.getVar('TMPDIR', e.data, True)
- checkfile = os.path.join(tmpdir, "saved_tmpdir")
- if os.path.exists(checkfile):
- f = file(checkfile, "r")
- saved_tmpdir = f.read().strip()
- if (saved_tmpdir != tmpdir):
- messages = messages + "Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir
- else:
- f = file(checkfile, "w")
- f.write(tmpdir)
- f.close()
+ omask = os.umask(022)
+ if omask & 0755:
+ messages = messages + "Please use a umask which allows a+rx and u+rwx\n"
+ os.umask(omask)
- #
- # Check the 'ABI' of TMPDIR
- #
- current_abi = data.getVar('OELAYOUT_ABI', e.data, True)
- abifile = data.getVar('SANITY_ABIFILE', e.data, True)
- if os.path.exists(abifile):
- f = file(abifile, "r")
- abi = f.read().strip()
- if not abi.isdigit():
- f = file(abifile, "w")
- f.write(current_abi)
- elif (abi != current_abi):
- # Code to convert from one ABI to another could go here if possible.
- messages = messages + "Error, TMPDIR has changed ABI (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi)
- else:
- f = file(abifile, "w")
- f.write(current_abi)
- f.close()
+ oes_bb_conf = data.getVar( 'OES_BITBAKE_CONF', e.data, True )
+ if not oes_bb_conf:
+ messages = messages + 'You do not include OpenEmbeddeds version of conf/bitbake.conf. This means your environment is misconfigured, in particular check BBPATH.\n'
- if messages != "":
- raise_sanity_error(messages)
+ nolibs = data.getVar('NO32LIBS', e.data, True)
+ if not nolibs:
+ lib32path = '/lib'
+ if os.path.exists('/lib64') and ( os.path.islink('/lib64') or os.path.islink('/lib') ):
+ lib32path = '/lib32'
+
+ if os.path.exists('%s/libc.so.6' % lib32path) and not os.path.exists('/usr/include/gnu/stubs-32.h'):
+ messages = messages + "You have a 32-bit libc, but no 32-bit headers. You must install the 32-bit libc headers.\n"
+
+ tmpdir = data.getVar('TMPDIR', e.data, True)
+ sstate_dir = data.getVar('SSTATE_DIR', e.data, True)
+
+ # Check saved sanity info
+ last_sanity_version = 0
+ last_tmpdir = ""
+ last_sstate_dir = ""
+ sanityverfile = 'conf/sanity_info'
+ if os.path.exists(sanityverfile):
+ f = file(sanityverfile, 'r')
+ for line in f:
+ if line.startswith('SANITY_VERSION'):
+ last_sanity_version = int(line.split()[1])
+ if line.startswith('TMPDIR'):
+ last_tmpdir = line.split()[1]
+ if line.startswith('SSTATE_DIR'):
+ last_sstate_dir = line.split()[1]
+
+ sanity_version = int(data.getVar('SANITY_VERSION', e.data, True) or 1)
+ if last_sanity_version < sanity_version:
+ messages = messages + check_sanity_version_change(e.data)
+ messages = messages + check_sanity_tmpdir_change(tmpdir, e.data)
+ messages = messages + check_sanity_sstate_dir_change(sstate_dir, e.data)
+ else:
+ if last_tmpdir != tmpdir:
+ messages = messages + check_sanity_tmpdir_change(tmpdir, e.data)
+ if last_sstate_dir != sstate_dir:
+ messages = messages + check_sanity_sstate_dir_change(sstate_dir, e.data)
+
+ if os.path.exists("conf") and not messages:
+ f = file(sanityverfile, 'w')
+ f.write("SANITY_VERSION %s\n" % sanity_version)
+ f.write("TMPDIR %s\n" % tmpdir)
+ f.write("SSTATE_DIR %s\n" % sstate_dir)
+
+ #
+ # Check that TMPDIR hasn't changed location since the last time we were run
+ #
+ checkfile = os.path.join(tmpdir, "saved_tmpdir")
+ if os.path.exists(checkfile):
+ f = file(checkfile, "r")
+ saved_tmpdir = f.read().strip()
+ if (saved_tmpdir != tmpdir):
+ messages = messages + "Error, TMPDIR has changed location. You need to either move it back to %s or rebuild\n" % saved_tmpdir
+ else:
+ f = file(checkfile, "w")
+ f.write(tmpdir)
+ f.close()
+
+ #
+ # Check the 'ABI' of TMPDIR
+ #
+ current_abi = data.getVar('OELAYOUT_ABI', e.data, True)
+ abifile = data.getVar('SANITY_ABIFILE', e.data, True)
+ if os.path.exists(abifile):
+ f = file(abifile, "r")
+ abi = f.read().strip()
+ if not abi.isdigit():
+ f = file(abifile, "w")
+ f.write(current_abi)
+ elif abi == "2" and current_abi == "3":
+ bb.note("Converting staging from layout version 2 to layout version 3")
+ os.system(e.data.expand("mv ${TMPDIR}/staging ${TMPDIR}/sysroots"))
+ os.system(e.data.expand("ln -s sysroots ${TMPDIR}/staging"))
+ os.system(e.data.expand("cd ${TMPDIR}/stamps; for i in */*do_populate_staging; do new=`echo $i | sed -e 's/do_populate_staging/do_populate_sysroot/'`; mv $i $new; done"))
+ f = file(abifile, "w")
+ f.write(current_abi)
+ elif abi == "3" and current_abi == "4":
+ bb.note("Converting staging layout from version 3 to layout version 4")
+ if os.path.exists(e.data.expand("${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}")):
+ os.system(e.data.expand("mv ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS} ${STAGING_BINDIR_CROSS}"))
+ os.system(e.data.expand("ln -s ${STAGING_BINDIR_CROSS} ${STAGING_DIR_NATIVE}${bindir_native}/${MULTIMACH_HOST_SYS}"))
+
+ f = file(abifile, "w")
+ f.write(current_abi)
+ elif abi == "4":
+ messages = messages + "Staging layout has changed. The cross directory has been deprecated and cross packages are now built under the native sysroot.\nThis requires a rebuild.\n"
+ elif abi == "5" and current_abi == "6":
+ bb.note("Converting staging layout from version 5 to layout version 6")
+ os.system(e.data.expand("mv ${TMPDIR}/pstagelogs ${SSTATE_MANIFESTS}"))
+ f = file(abifile, "w")
+ f.write(current_abi)
+ elif abi == "7" and current_abi == "8":
+ messages = messages + "Your configuration is using stamp files including the sstate hash but your build directory was built with stamp files that do not include this.\nTo continue, either rebuild or switch back to the OEBasic signature handler with BB_SIGNATURE_HANDLER = 'OEBasic'.\n"
+ elif (abi != current_abi):
+ # Code to convert from one ABI to another could go here if possible.
+ messages = messages + "Error, TMPDIR has changed its layout version number (%s to %s) and you need to either rebuild, revert or adjust it at your own risk.\n" % (abi, current_abi)
+ else:
+ f = file(abifile, "w")
+ f.write(current_abi)
+ f.close()
+
+ oeroot = data.getVar('COREBASE', e.data)
+ if oeroot.find ('+') != -1:
+ messages = messages + "Error, you have an invalid character (+) in your COREBASE directory path. Please move the installation to a directory which doesn't include a +."
+ elif oeroot.find (' ') != -1:
+ messages = messages + "Error, you have a space in your COREBASE directory path. Please move the installation to a directory which doesn't include a space."
+
+ if messages != "":
+ raise_sanity_error(messages)
addhandler check_sanity_eventhandler
python check_sanity_eventhandler() {
- from bb import note, error, data, __version__
- from bb.event import getName
-
- if getName(e) == "ConfigParsed":
+ if bb.event.getName(e) == "ConfigParsed" and e.data.getVar("BB_WORKERCONTEXT", True) != "1" and e.data.getVar("DISABLE_SANITY_CHECKS", True) != "1":
+ check_sanity(e)
+ elif bb.event.getName(e) == "SanityCheck":
check_sanity(e)
+ bb.event.fire(bb.event.SanityCheckPassed(), e.data)
- return NotHandled
+ return
}
diff --git a/meta/classes/scons.bbclass b/meta/classes/scons.bbclass
index 534b3bd4c..a07a366df 100644
--- a/meta/classes/scons.bbclass
+++ b/meta/classes/scons.bbclass
@@ -1,13 +1,15 @@
DEPENDS += "python-scons-native"
+EXTRA_OESCONS ?= ""
+
scons_do_compile() {
- ${STAGING_BINDIR_NATIVE}/scons PREFIX=${prefix} prefix=${prefix} || \
- oefatal "scons build execution failed."
+ ${STAGING_BINDIR_NATIVE}/scons PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
+ bbfatal "scons build execution failed."
}
scons_do_install() {
- ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install || \
- oefatal "scons install execution failed."
+ ${STAGING_BINDIR_NATIVE}/scons PREFIX=${D}${prefix} prefix=${D}${prefix} install ${EXTRA_OESCONS}|| \
+ bbfatal "scons install execution failed."
}
EXPORT_FUNCTIONS do_compile do_install
diff --git a/meta/classes/sdk.bbclass b/meta/classes/sdk.bbclass
deleted file mode 100644
index 20670e885..000000000
--- a/meta/classes/sdk.bbclass
+++ /dev/null
@@ -1,106 +0,0 @@
-# SDK packages are built either explicitly by the user,
-# or indirectly via dependency. No need to be in 'world'.
-EXCLUDE_FROM_WORLD = "1"
-
-# Save MULTIMACH_ARCH
-OLD_MULTIMACH_ARCH := "${MULTIMACH_ARCH}"
-# Save PACKAGE_ARCH
-OLD_PACKAGE_ARCH := ${PACKAGE_ARCH}
-PACKAGE_ARCH = "${BUILD_ARCH}-${OLD_PACKAGE_ARCH}-sdk"
-# Also save BASE_PACKAGE_ARCH since HOST_ARCH can influence it
-OLD_BASE_PACKAGE_ARCH := "${BASE_PACKAGE_ARCH}"
-BASE_PACKAGE_ARCH = "${OLD_BASE_PACKAGE_ARCH}"
-
-STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}-sdk"
-STAGING_DIR_TARGET = "${STAGING_DIR}/${OLD_MULTIMACH_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
-
-HOST_ARCH = "${BUILD_ARCH}"
-HOST_VENDOR = "${BUILD_VENDOR}"
-HOST_OS = "${BUILD_OS}"
-HOST_PREFIX = "${BUILD_PREFIX}"
-HOST_CC_ARCH = "${BUILD_CC_ARCH}"
-#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
-
-CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
-CFLAGS = "${BUILDSDK_CFLAGS}"
-CXXFLAGS = "${BUILDSDK_CFLAGS}"
-LDFLAGS = "${BUILDSDK_LDFLAGS}"
-
-# Path prefixes
-prefix = "${SDK_PREFIX}"
-exec_prefix = "${prefix}"
-base_prefix = "${prefix}"
-
-# Base paths
-export base_bindir = "${prefix}/bin"
-export base_sbindir = "${prefix}/bin"
-export base_libdir = "${prefix}/lib"
-
-# Architecture independent paths
-export datadir = "${prefix}/share"
-export sysconfdir = "${prefix}/etc"
-export sharedstatedir = "${datadir}/com"
-export localstatedir = "${prefix}/var"
-export infodir = "${datadir}/info"
-export mandir = "${datadir}/man"
-export docdir = "${datadir}/doc"
-export servicedir = "${prefix}/srv"
-
-# Architecture dependent paths
-export bindir = "${prefix}/bin"
-export sbindir = "${prefix}/bin"
-export libexecdir = "${prefix}/libexec"
-export libdir = "${prefix}/lib"
-export includedir = "${prefix}/include"
-export oldincludedir = "${prefix}/include"
-
-FILES_${PN} = "${prefix}"
-FILES_${PN}-dbg += "${prefix}/.debug \
- ${prefix}/bin/.debug \
- "
-
-export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
-export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
-
-python () {
- barch = bb.data.getVar('BUILD_ARCH', d, True)
- archs = bb.data.getVar('PACKAGE_ARCHS', d, True).split()
- sdkarchs = []
- for arch in archs:
- sdkarchs.append(barch + '-' + arch + '-sdk')
- bb.data.setVar('PACKAGE_ARCHS', " ".join(sdkarchs), d)
-}
-
-python __anonymous () {
- pn = bb.data.getVar("PN", d, True)
- depends = bb.data.getVar("DEPENDS", d, True)
- deps = bb.utils.explode_deps(depends)
- if "sdk" in (bb.data.getVar('BBCLASSEXTEND', d, True) or ""):
- autoextend = True
- else:
- autoextend = False
- for dep in deps:
- if dep.endswith("-native") or dep.endswith("-cross"):
- continue
- if not dep.endswith("-sdk"):
- if autoextend:
- depends = depends.replace(dep, dep + "-sdk")
- elif pn == 'gcc-cross-sdk':
- continue
- else:
- bb.note("%s has depends %s which doesn't end in -sdk?" % (pn, dep))
- bb.data.setVar("DEPENDS", depends, d)
- provides = bb.data.getVar("PROVIDES", d, True)
- for prov in provides.split():
- if prov.find(pn) != -1:
- continue
- if not prov.endswith("-sdk"):
- if autoextend:
- provides = provides.replace(prov, prov + "-sdk")
- #else:
- # bb.note("%s has rouge PROVIDES of %s which doesn't end in -sdk?" % (pn, prov))
- bb.data.setVar("PROVIDES", provides, d)
-
-}
-
-
diff --git a/meta/classes/sdl.bbclass b/meta/classes/sdl.bbclass
index d478d97f1..cc31288f6 100644
--- a/meta/classes/sdl.bbclass
+++ b/meta/classes/sdl.bbclass
@@ -3,42 +3,4 @@
#
DEPENDS += "virtual/libsdl libsdl-mixer libsdl-image"
-
-APPDESKTOP ?= "${PN}.desktop"
-APPNAME ?= "${PN}"
-APPIMAGE ?= "${PN}.png"
-
-sdl_do_sdl_install() {
- install -d ${D}${palmtopdir}/bin
- install -d ${D}${palmtopdir}/pics
- install -d ${D}${palmtopdir}/apps/Games
- ln -sf ${bindir}/${APPNAME} ${D}${palmtopdir}/bin/${APPNAME}
- install -m 0644 ${APPIMAGE} ${D}${palmtopdir}/pics/${PN}.png
-
- if [ -e "${APPDESKTOP}" ]
- then
- echo ${APPDESKTOP} present, installing to palmtopdir...
- install -m 0644 ${APPDESKTOP} ${D}${palmtopdir}/apps/Games/${PN}.desktop
- else
- echo ${APPDESKTOP} not present, creating one on-the-fly...
- cat >${D}${palmtopdir}/apps/Games/${PN}.desktop <<EOF
-[Desktop Entry]
-Note=Auto Generated... this may be not what you want
-Comment=${DESCRIPTION}
-Exec=${APPNAME}
-Icon=${PN}.png
-Type=Application
-Name=${PN}
-EOF
- fi
-}
-
-EXPORT_FUNCTIONS do_sdl_install
-addtask sdl_install after do_compile before do_populate_staging
-
SECTION = "x11/games"
-SECTION_${PN}-opie = "opie/games"
-
-PACKAGES += "${PN}-opie"
-RDEPENDS_${PN}-opie += "${PN}"
-FILES_${PN}-opie = "${palmtopdir}"
diff --git a/meta/classes/setuptools.bbclass b/meta/classes/setuptools.bbclass
new file mode 100644
index 000000000..ced9509df
--- /dev/null
+++ b/meta/classes/setuptools.bbclass
@@ -0,0 +1,8 @@
+inherit distutils
+
+DEPENDS += "python-setuptools-native"
+
+DISTUTILS_INSTALL_ARGS = "--root=${D} \
+ --single-version-externally-managed \
+ --prefix=${prefix} \
+ --install-data=${datadir}"
diff --git a/meta/classes/singlemachine.bbclass b/meta/classes/singlemachine.bbclass
deleted file mode 100644
index c685ce397..000000000
--- a/meta/classes/singlemachine.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Emulates the old mode of OE operation where only one machine can be targetted.
-#
-
-MULTIMACH_TARGET_SYS = "${TARGET_SYS}"
-MULTIMACH_HOST_SYS = "${HOST_SYS}"
-
-STAMP = "${TMPDIR}/stamps/${PF}"
-WORKDIR = "${TMPDIR}/work/${PF}"
-STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}"
-STAGING_DIR_TARGET = "${STAGING_DIR}/${TARGET_SYS}"
-PKGDATA_DIR = "${STAGING_DIR}/pkgdata"
-STAGING_KERNEL_DIR = "${STAGING_DIR_HOST}/kernel"
-
-
diff --git a/meta/classes/sip.bbclass b/meta/classes/sip.bbclass
index a258fda62..3a00e4dbd 100644
--- a/meta/classes/sip.bbclass
+++ b/meta/classes/sip.bbclass
@@ -1,6 +1,8 @@
# Build Class for Sip based Python Bindings
# (C) Michael 'Mickey' Lauer <mickey@Vanille.de>
#
+STAGING_SIPDIR ?= "${STAGING_DATADIR_NATIVE}/sip"
+
DEPENDS =+ "sip-native"
RDEPENDS += "python-sip"
@@ -17,7 +19,7 @@ sip_do_generate() {
if [ -z "$MODULES" ]; then
die "SIP_MODULES not set and no modules found in $PWD"
else
- oenote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
+ bbnote "using modules '${SIP_MODULES}' and tags '${EXTRA_SIPTAGS}'"
fi
if [ -z "${EXTRA_SIPTAGS}" ]; then
@@ -28,7 +30,7 @@ sip_do_generate() {
if [ ! -z "${SIP_FEATURES}" ]; then
FEATURES="-z ${SIP_FEATURES}"
- oenote "sip feature file: ${SIP_FEATURES}"
+ bbnote "sip feature file: ${SIP_FEATURES}"
fi
for module in $MODULES
diff --git a/meta/classes/siteconfig.bbclass b/meta/classes/siteconfig.bbclass
new file mode 100644
index 000000000..ccbe5b99c
--- /dev/null
+++ b/meta/classes/siteconfig.bbclass
@@ -0,0 +1,32 @@
+python siteconfig_do_siteconfig () {
+ shared_state = sstate_state_fromvars(d)
+ if shared_state['name'] != 'populate-sysroot':
+ return
+ if not os.path.isdir(os.path.join(d.getVar('FILE_DIRNAME', True), 'site_config')):
+ bb.debug(1, "No site_config directory, skipping do_siteconfig")
+ return
+ bb.build.exec_func('do_siteconfig_gencache', d)
+ sstate_clean(shared_state, d)
+ sstate_install(shared_state, d)
+}
+
+EXTRASITECONFIG ?= ""
+
+siteconfig_do_siteconfig_gencache () {
+ mkdir -p ${WORKDIR}/site_config_${MACHINE}
+ gen-site-config ${FILE_DIRNAME}/site_config \
+ >${WORKDIR}/site_config_${MACHINE}/configure.ac
+ cd ${WORKDIR}/site_config_${MACHINE}
+ autoconf
+ CONFIG_SITE="" ${EXTRASITECONFIG} ./configure ${CONFIGUREOPTS} --cache-file ${PN}_cache
+ sed -n -e "/ac_cv_c_bigendian/p" -e "/ac_cv_sizeof_/p" \
+ -e "/ac_cv_type_/p" -e "/ac_cv_header_/p" -e "/ac_cv_func_/p" \
+ < ${PN}_cache > ${PN}_config
+ mkdir -p ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+ cp ${PN}_config ${SYSROOT_DESTDIR}${datadir}/${TARGET_SYS}_config_site.d
+
+}
+
+do_populate_sysroot[sstate-interceptfuncs] += "do_siteconfig "
+
+EXPORT_FUNCTIONS do_siteconfig do_siteconfig_gencache
diff --git a/meta/classes/siteinfo.bbclass b/meta/classes/siteinfo.bbclass
index 431b81ce2..8c256ceff 100644
--- a/meta/classes/siteinfo.bbclass
+++ b/meta/classes/siteinfo.bbclass
@@ -15,123 +15,130 @@
# It is an error for the target not to exist.
# If 'what' doesn't exist then an empty value is returned
#
-def get_siteinfo_list(d):
- import bb
+def siteinfo_data(d):
+ archinfo = {
+ "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
+ "arm": "endian-little bit-32 arm-common",
+ "armeb": "endian-big bit-32 arm-common",
+ "avr32": "endian-big bit-32 avr32-common",
+ "bfin": "endian-little bit-32 bfin-common",
+ "i386": "endian-little bit-32 ix86-common",
+ "i486": "endian-little bit-32 ix86-common",
+ "i586": "endian-little bit-32 ix86-common",
+ "i686": "endian-little bit-32 ix86-common",
+ "ia64": "endian-little bit-64",
+ "microblaze": "endian-big bit-32 microblaze-common",
+ "microblazeel": "endian-little bit-32 microblaze-common",
+ "mips": "endian-big bit-32 mips-common",
+ "mips64": "endian-big bit-64 mips64-common",
+ "mips64el": "endian-little bit-64 mips64-common",
+ "mipsel": "endian-little bit-32 mips-common",
+ "powerpc": "endian-big bit-32 powerpc-common",
+ "nios2": "endian-little bit-32 nios2-common",
+ "powerpc64": "endian-big bit-64 powerpc-common",
+ "ppc": "endian-big bit-32 powerpc-common",
+ "ppc64": "endian-big bit-64 powerpc-common",
+ "sh3": "endian-little bit-32 sh-common",
+ "sh4": "endian-little bit-32 sh-common",
+ "sparc": "endian-big bit-32",
+ "viac3": "endian-little bit-32 ix86-common",
+ "x86_64": "endian-little", # bitinfo specified in targetinfo
+ }
+ osinfo = {
+ "darwin": "common-darwin",
+ "darwin9": "common-darwin",
+ "linux": "common-linux common-glibc",
+ "linux-gnu": "common-linux common-glibc",
+ "linux-gnux32": "common-linux common-glibc",
+ "linux-gnueabi": "common-linux common-glibc",
+ "linux-gnuspe": "common-linux common-glibc",
+ "linux-uclibc": "common-linux common-uclibc",
+ "linux-uclibceabi": "common-linux common-uclibc",
+ "linux-uclibcspe": "common-linux common-uclibc",
+ "uclinux-uclibc": "common-uclibc",
+ "cygwin": "common-cygwin",
+ "mingw32": "common-mingw",
+ }
+ targetinfo = {
+ "arm-linux-gnueabi": "arm-linux",
+ "arm-linux-uclibceabi": "arm-linux-uclibc",
+ "armeb-linux-gnueabi": "armeb-linux",
+ "armeb-linux-uclibceabi": "armeb-linux-uclibc",
+ "powerpc-linux": "powerpc32-linux",
+ "powerpc-linux-uclibc": "powerpc-linux powerpc32-linux",
+ "powerpc-linux-gnuspe": "powerpc-linux powerpc32-linux",
+ "powerpc-linux-uclibcspe": "powerpc-linux powerpc32-linux powerpc-linux-uclibc",
+ "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
+ "powerpc64-linux": "powerpc-linux",
+ "x86_64-cygwin": "bit-64",
+ "x86_64-darwin": "bit-64",
+ "x86_64-darwin9": "bit-64",
+ "x86_64-linux": "bit-64",
+ "x86_64-linux-uclibc": "bit-64",
+ "x86_64-linux-gnu": "bit-64 x86_64-linux",
+ "x86_64-linux-gnux32": "bit-32 ix86-common x32-linux",
+ "x86_64-mingw32": "bit-64",
+ }
- target = bb.data.getVar('HOST_ARCH', d, 1) + "-" + bb.data.getVar('HOST_OS', d, 1)
+ hostarch = d.getVar("HOST_ARCH", True)
+ hostos = d.getVar("HOST_OS", True)
+ target = "%s-%s" % (hostarch, hostos)
- targetinfo = {\
- "armeb-linux": "endian-big bit-32 common-glibc arm-common",\
- "armeb-linux-gnueabi": "endian-big bit-32 common-glibc arm-common armeb-linux",\
- "armeb-linux-uclibc": "endian-big bit-32 common-uclibc arm-common",\
- "armeb-linux-uclibcgnueabi": "endian-big bit-32 common-uclibc arm-common armeb-linux-uclibc",\
- "arm-darwin": "endian-little bit-32 common-darwin",\
- "arm-darwin8": "endian-little bit-32 common-darwin",\
- "arm-linux": "endian-little bit-32 common-glibc arm-common",\
- "arm-linux-gnueabi": "endian-little bit-32 common-glibc arm-common arm-linux",\
- "arm-linux-uclibc": "endian-little bit-32 common-uclibc arm-common",\
- "arm-linux-uclibcgnueabi": "endian-little bit-32 common-uclibc arm-common arm-linux-uclibc",\
- "avr32-linux": "endian-big bit-32 common-glibc avr32-common",\
- "avr32-linux-uclibc": "endian-big bit-32 common-uclibc avr32-common",\
- "bfin-uclinux-uclibc": "endian-little bit-32 common-uclibc bfin-common",\
- "i386-linux": "endian-little bit-32 common-glibc ix86-common",\
- "i486-linux": "endian-little bit-32 common-glibc ix86-common",\
- "i586-linux": "endian-little bit-32 common-glibc ix86-common",\
- "i686-linux": "endian-little bit-32 common-glibc ix86-common",\
- "i386-linux-uclibc": "endian-little bit-32 common-uclibc ix86-common",\
- "i486-linux-uclibc": "endian-little bit-32 common-uclibc ix86-common",\
- "i586-linux-uclibc": "endian-little bit-32 common-uclibc ix86-common",\
- "i686-linux-uclibc": "endian-little bit-32 common-uclibc ix86-common",\
- "mipsel-linux": "endian-little bit-32 common-glibc",\
- "mipsel-linux-uclibc": "endian-little bit-32 common-uclibc",\
- "mips-linux": "endian-big bit-32 common-glibc",\
- "mips-linux-uclibc": "endian-big bit-32 common-uclibc",\
- "powerpc-darwin": "endian-big bit-32 common-darwin",\
- "ppc-linux": "endian-big bit-32 common-glibc powerpc-common",\
- "powerpc-linux": "endian-big bit-32 common-glibc powerpc-common",\
- "powerpc-linux-uclibc": "endian-big bit-32 common-uclibc powerpc-common",\
- "sh3-linux": "endian-little bit-32 common-glibc sh-common",\
- "sh4-linux": "endian-little bit-32 common-glibc sh-common",\
- "sh4-linux-uclibc": "endian-little bit-32 common-uclibc sh-common",\
- "sparc-linux": "endian-big bit-32 common-glibc",\
- "x86_64-linux": "endian-little bit-64 common-glibc",\
- "x86_64-linux-uclibc": "endian-little bit-64 common-uclibc"}
- if target in targetinfo:
- info = targetinfo[target].split()
- info.append(target)
- info.append("common")
- return info
- else:
- bb.error("Information not available for target '%s'" % target)
+ sitedata = []
+ if hostarch in archinfo:
+ sitedata.extend(archinfo[hostarch].split())
+ if hostos in osinfo:
+ sitedata.extend(osinfo[hostos].split())
+ if target in targetinfo:
+ sitedata.extend(targetinfo[target].split())
+ sitedata.append(target)
+ sitedata.append("common")
+ bb.debug(1, "SITE files %s" % sitedata);
+ return sitedata
-#
-# Define which site files to use. We check for several site files and
-# use each one that is found, based on the list returned by get_siteinfo_list()
-#
-# Search for the files in the following directories:
-# 1) ${BBPATH}/site (in reverse) - app specific, then site wide
-# 2) ${FILE_DIRNAME}/site-${PV} - app version specific
-#
-def siteinfo_get_files(d):
- import bb, os
+python () {
+ sitedata = set(siteinfo_data(d))
+ if "endian-little" in sitedata:
+ d.setVar("SITEINFO_ENDIANNESS", "le")
+ elif "endian-big" in sitedata:
+ d.setVar("SITEINFO_ENDIANNESS", "be")
+ else:
+ bb.error("Unable to determine endianness for architecture '%s'" %
+ d.getVar("HOST_ARCH", True))
+ bb.fatal("Please add your architecture to siteinfo.bbclass")
- sitefiles = ""
+ if "bit-32" in sitedata:
+ d.setVar("SITEINFO_BITS", "32")
+ elif "bit-64" in sitedata:
+ d.setVar("SITEINFO_BITS", "64")
+ else:
+ bb.error("Unable to determine bit size for architecture '%s'" %
+ d.getVar("HOST_ARCH", True))
+ bb.fatal("Please add your architecture to siteinfo.bbclass")
+}
- # Determine which site files to look for
- sites = get_siteinfo_list(d)
- sites.append("common");
+def siteinfo_get_files(d, no_cache = False):
+ sitedata = siteinfo_data(d)
+ sitefiles = ""
+ for path in d.getVar("BBPATH", True).split(":"):
+ for element in sitedata:
+ filename = os.path.join(path, "site", element)
+ if os.path.exists(filename):
+ sitefiles += filename + " "
- # Check along bbpath for site files and append in reverse order so
- # the application specific sites files are last and system site
- # files first.
- path_bb = bb.data.getVar('BBPATH', d, 1)
- for p in (path_bb or "").split(':'):
- tmp = ""
- for i in sites:
- fname = os.path.join(p, 'site', i)
- if os.path.exists(fname):
- tmp += fname + " "
- sitefiles = tmp + sitefiles;
+ if no_cache: return sitefiles
- # Now check for the applications version specific site files
- path_pkgv = os.path.join(bb.data.getVar('FILE_DIRNAME', d, 1), "site-" + bb.data.getVar('PV', d, 1))
- for i in sites:
- fname = os.path.join(path_pkgv, i)
- if os.path.exists(fname):
- sitefiles += fname + " "
+ # Now check for siteconfig cache files
+ path_siteconfig = d.getVar('SITECONFIG_SYSROOTCACHE', True)
+ if os.path.isdir(path_siteconfig):
+ for i in os.listdir(path_siteconfig):
+ filename = os.path.join(path_siteconfig, i)
+ sitefiles += filename + " "
- bb.debug(1, "SITE files " + sitefiles);
- return sitefiles
-
-#
-# Export CONFIG_SITE to the enviroment. The autotools will make use
-# of this to determine where to load in variables from. This is a
-# space seperate list of shell scripts processed in the order listed.
-#
-export CONFIG_SITE = "${@siteinfo_get_files(d)}"
-
-
-def siteinfo_get_endianess(d):
- info = get_siteinfo_list(d)
- if 'endian-little' in info:
- return "le"
- elif 'endian-big' in info:
- return "be"
- bb.error("Site info could not determine endianess for target")
-
-def siteinfo_get_bits(d):
- info = get_siteinfo_list(d)
- if 'bit-32' in info:
- return "32"
- elif 'bit-64' in info:
- return "64"
- bb.error("Site info could not determine bit size for target")
+ return sitefiles
#
# Make some information available via variables
#
-SITEINFO_ENDIANESS = "${@siteinfo_get_endianess(d)}"
-SITEINFO_BITS = "${@siteinfo_get_bits(d)}"
-
-
+SITECONFIG_SYSROOTCACHE = "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
diff --git a/meta/classes/sourcepkg.bbclass b/meta/classes/sourcepkg.bbclass
deleted file mode 100644
index 390d3684d..000000000
--- a/meta/classes/sourcepkg.bbclass
+++ /dev/null
@@ -1,111 +0,0 @@
-DEPLOY_DIR_SRC ?= "${DEPLOY_DIR}/source"
-EXCLUDE_FROM ?= ".pc autom4te.cache"
-
-# used as part of a path. make sure it's set
-DISTRO ?= "openembedded"
-
-def get_src_tree(d):
- import bb
- import os, os.path
-
- workdir = bb.data.getVar('WORKDIR', d, 1)
- if not workdir:
- bb.error("WORKDIR not defined, unable to find source tree.")
- return
-
- s = bb.data.getVar('S', d, 0)
- if not s:
- bb.error("S not defined, unable to find source tree.")
- return
-
- s_tree_raw = s.split('/')[1]
- s_tree = bb.data.expand(s_tree_raw, d)
-
- src_tree_path = os.path.join(workdir, s_tree)
- try:
- os.listdir(src_tree_path)
- except OSError:
- bb.fatal("Expected to find source tree in '%s' which doesn't exist." % src_tree_path)
- bb.debug("Assuming source tree is '%s'" % src_tree_path)
-
- return s_tree
-
-sourcepkg_do_create_orig_tgz(){
-
- mkdir -p ${DEPLOY_DIR_SRC}
- cd ${WORKDIR}
- for i in ${EXCLUDE_FROM}; do
- echo $i >> temp/exclude-from-file
- done
-
- src_tree=${@get_src_tree(d)}
-
- echo $src_tree
- oenote "Creating .orig.tar.gz in ${DEPLOY_DIR_SRC}/${P}.orig.tar.gz"
- tar cvzf ${DEPLOY_DIR_SRC}/${P}.orig.tar.gz --exclude-from temp/exclude-from-file $src_tree
- cp -pPR $src_tree $src_tree.orig
-}
-
-sourcepkg_do_archive_bb() {
-
- src_tree=${@get_src_tree(d)}
- dest=${WORKDIR}/$src_tree/${DISTRO}
- mkdir -p $dest
-
- cp ${FILE} $dest
-}
-
-python sourcepkg_do_dumpdata() {
- import os
- import os.path
-
- workdir = bb.data.getVar('WORKDIR', d, 1)
- distro = bb.data.getVar('DISTRO', d, 1)
- s_tree = get_src_tree(d)
- openembeddeddir = os.path.join(workdir, s_tree, distro)
- dumpfile = os.path.join(openembeddeddir, bb.data.expand("${P}-${PR}.showdata.dump",d))
-
- try:
- os.mkdir(openembeddeddir)
- except OSError:
- # dir exists
- pass
-
- bb.note("Dumping metadata into '%s'" % dumpfile)
- f = open(dumpfile, "w")
- # emit variables and shell functions
- bb.data.emit_env(f, d, True)
- # emit the metadata which isnt valid shell
- for e in d.keys():
- if bb.data.getVarFlag(e, 'python', d):
- f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, 1)))
- f.close()
-}
-
-sourcepkg_do_create_diff_gz(){
-
- cd ${WORKDIR}
- for i in ${EXCLUDE_FROM}; do
- echo $i >> temp/exclude-from-file
- done
-
-
- src_tree=${@get_src_tree(d)}
-
- for i in `find . -maxdepth 1 -type f`; do
- mkdir -p $src_tree/${DISTRO}/files
- cp $i $src_tree/${DISTRO}/files
- done
-
- oenote "Creating .diff.gz in ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.gz"
- LC_ALL=C TZ=UTC0 diff --exclude-from=temp/exclude-from-file -Naur $src_tree.orig $src_tree | gzip -c > ${DEPLOY_DIR_SRC}/${P}-${PR}.diff.gz
- rm -rf $src_tree.orig
-}
-
-EXPORT_FUNCTIONS do_create_orig_tgz do_archive_bb do_dumpdata do_create_diff_gz
-
-addtask create_orig_tgz after do_unpack before do_patch
-addtask archive_bb after do_patch before do_dumpdata
-addtask dumpdata after archive_bb before do_create_diff_gz
-addtask create_diff_gz after do_dump_data before do_configure
-
diff --git a/meta/classes/src_distribute.bbclass b/meta/classes/src_distribute.bbclass
deleted file mode 100644
index f20410d0b..000000000
--- a/meta/classes/src_distribute.bbclass
+++ /dev/null
@@ -1,27 +0,0 @@
-SRC_DISTRIBUTECOMMAND[func] = "1"
-python do_distribute_sources () {
- l = bb.data.createCopy(d)
- bb.data.update_data(l)
- licenses = (bb.data.getVar('LICENSE', d, 1) or "unknown").split()
-
- sources_dir = bb.data.getVar('SRC_DISTRIBUTEDIR', d, 1)
- import re
- for license in licenses:
- for entry in license.split("|"):
- for s in (bb.data.getVar('A', d, 1) or "").split():
- s = re.sub(';.*$', '', s)
- cmd = bb.data.getVar('SRC_DISTRIBUTECOMMAND', d, 1)
- if not cmd:
- raise bb.build.FuncFailed("Unable to distribute sources, SRC_DISTRIBUTECOMMAND not defined")
- bb.data.setVar('SRC', s, d)
- bb.data.setVar('SRC_DISTRIBUTEDIR', "%s/%s" % (sources_dir, entry), d)
- bb.build.exec_func('SRC_DISTRIBUTECOMMAND', d)
-}
-
-addtask distribute_sources before do_build after do_fetch
-
-addtask distsrcall after do_distribute_sources
-do_distall[recrdeptask] = "do_distribute_sources"
-base_do_distsrcall() {
- :
-}
diff --git a/meta/classes/src_distribute_local.bbclass b/meta/classes/src_distribute_local.bbclass
deleted file mode 100644
index 5f0cef5be..000000000
--- a/meta/classes/src_distribute_local.bbclass
+++ /dev/null
@@ -1,31 +0,0 @@
-inherit src_distribute
-
-# SRC_DIST_LOCAL possible values:
-# copy copies the files from ${A} to the distributedir
-# symlink symlinks the files from ${A} to the distributedir
-# move+symlink moves the files into distributedir, and symlinks them back
-SRC_DIST_LOCAL ?= "move+symlink"
-SRC_DISTRIBUTEDIR ?= "${DEPLOY_DIR}/sources"
-SRC_DISTRIBUTECOMMAND () {
- s="${SRC}"
- if [ ! -L "$s" ] && (echo "$s"|grep "^${DL_DIR}"); then
- :
- else
- exit 0;
- fi
- mkdir -p ${SRC_DISTRIBUTEDIR}
- case "${SRC_DIST_LOCAL}" in
- copy)
- test -e $s.md5 && cp -f $s.md5 ${SRC_DISTRIBUTEDIR}/
- cp -f $s ${SRC_DISTRIBUTEDIR}/
- ;;
- symlink)
- test -e $s.md5 && ln -sf $s.md5 ${SRC_DISTRIBUTEDIR}/
- ln -sf $s ${SRC_DISTRIBUTEDIR}/
- ;;
- move+symlink)
- mv $s ${SRC_DISTRIBUTEDIR}/
- ln -sf ${SRC_DISTRIBUTEDIR}/`basename $s` $s
- ;;
- esac
-}
diff --git a/meta/classes/srec.bbclass b/meta/classes/srec.bbclass
deleted file mode 100644
index a869a4f1f..000000000
--- a/meta/classes/srec.bbclass
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-# Creates .srec files from images.
-#
-# Useful for loading with Yamon.
-
-# Define SREC_VMAADDR in your machine.conf.
-
-SREC_CMD = "${TARGET_PREFIX}objcopy -O srec -I binary --adjust-vma ${SREC_VMAADDR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.${type} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.${type}.srec"
-
-# Do not build srec files for these types of images:
-SREC_SKIP = "tar"
-
-do_srec[nostamp] = "1"
-
-do_srec () {
- if [ ${SREC_VMAADDR} = "" ] ; then
- oefatal Cannot do_srec without SREC_VMAADDR defined.
- fi
- for type in ${IMAGE_FSTYPES}; do
- for skiptype in ${SREC_SKIP}; do
- if [ $type = $skiptype ] ; then continue 2 ; fi
- done
- ${SREC_CMD}
- done
- return 0
-}
-
-addtask srec after do_rootfs before do_build
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass
new file mode 100644
index 000000000..ae019379b
--- /dev/null
+++ b/meta/classes/sstate.bbclass
@@ -0,0 +1,558 @@
+SSTATE_VERSION = "2"
+
+SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
+SSTATE_MANFILEBASE = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-"
+SSTATE_MANFILEPREFIX = "${SSTATE_MANFILEBASE}${PN}"
+
+
+SSTATE_PKGARCH = "${PACKAGE_ARCH}"
+SSTATE_PKGSPEC = "sstate-${PN}-${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}-${PV}-${PR}-${SSTATE_PKGARCH}-${SSTATE_VERSION}-"
+SSTATE_PKGNAME = "${SSTATE_PKGSPEC}${BB_TASKHASH}"
+SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
+
+SSTATE_SCAN_FILES ?= "*.la *-config *_config"
+SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
+
+BB_HASHFILENAME = "${SSTATE_PKGNAME}"
+
+SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
+
+SSTATEPREINSTFUNCS ?= ""
+SSTATEPOSTINSTFUNCS ?= ""
+
+python () {
+ if bb.data.inherits_class('native', d):
+ d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH'))
+ elif bb.data.inherits_class('cross', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TUNE_PKGARCH}"))
+ d.setVar('SSTATE_MANMACH', d.expand("${BUILD_ARCH}_${MACHINE}"))
+ elif bb.data.inherits_class('crosssdk', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${PACKAGE_ARCH}"))
+ elif bb.data.inherits_class('nativesdk', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}"))
+ elif bb.data.inherits_class('cross-canadian', d):
+ d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
+ else:
+ d.setVar('SSTATE_MANMACH', d.expand("${MACHINE}"))
+
+ # These classes encode staging paths into their scripts data so can only be
+ # reused if we manipulate the paths
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
+ scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
+ d.setVar('SSTATE_SCAN_CMD', scan_cmd)
+
+ unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
+ d.setVar('SSTATETASKS', " ".join(unique_tasks))
+ namemap = []
+ for task in unique_tasks:
+ namemap.append(d.getVarFlag(task, 'sstate-name'))
+ d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
+ d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
+ d.setVar('SSTATETASKNAMES', " ".join(namemap))
+}
+
+def sstate_init(name, task, d):
+ ss = {}
+ ss['task'] = task
+ ss['name'] = name
+ ss['dirs'] = []
+ ss['plaindirs'] = []
+ ss['lockfiles'] = []
+ ss['lockfiles-shared'] = []
+ return ss
+
+def sstate_state_fromvars(d, task = None):
+ if task is None:
+ task = d.getVar('BB_CURRENTTASK', True)
+ if not task:
+ bb.fatal("sstate code running without task context?!")
+ task = task.replace("_setscene", "")
+
+ name = d.getVarFlag("do_" + task, 'sstate-name', True)
+ inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
+ outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
+ plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
+ lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
+ lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
+ interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
+ if not name or len(inputs) != len(outputs):
+ bb.fatal("sstate variables not setup correctly?!")
+
+ ss = sstate_init(name, task, d)
+ for i in range(len(inputs)):
+ sstate_add(ss, inputs[i], outputs[i], d)
+ ss['lockfiles'] = lockfiles
+ ss['lockfiles-shared'] = lockfilesshared
+ ss['plaindirs'] = plaindirs
+ ss['interceptfuncs'] = interceptfuncs
+ return ss
+
+def sstate_add(ss, source, dest, d):
+ srcbase = os.path.basename(source)
+ ss['dirs'].append([srcbase, source, dest])
+ return ss
+
+def sstate_install(ss, d):
+ import oe.path
+
+ sharedfiles = []
+ shareddirs = []
+ bb.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+ manifest = d.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['name'])
+
+ if os.access(manifest, os.R_OK):
+ bb.fatal("Package already staged (%s)?!" % manifest)
+
+ locks = []
+ for lock in ss['lockfiles-shared']:
+ locks.append(bb.utils.lockfile(lock, True))
+ for lock in ss['lockfiles']:
+ locks.append(bb.utils.lockfile(lock))
+
+ for state in ss['dirs']:
+ oe.path.copytree(state[1], state[2])
+ for walkroot, dirs, files in os.walk(state[1]):
+ bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
+ for file in files:
+ srcpath = os.path.join(walkroot, file)
+ dstpath = srcpath.replace(state[1], state[2])
+ #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
+ sharedfiles.append(dstpath)
+ for dir in dirs:
+ srcdir = os.path.join(walkroot, dir)
+ dstdir = srcdir.replace(state[1], state[2])
+ #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
+ if not dstdir.endswith("/"):
+ dstdir = dstdir + "/"
+ shareddirs.append(dstdir)
+ f = open(manifest, "w")
+ for file in sharedfiles:
+ f.write(file + "\n")
+ # We want to ensure that directories appear at the end of the manifest
+ # so that when we test to see if they should be deleted any contents
+ # added by the task will have been removed first.
+ dirs = sorted(shareddirs, key=len)
+ # Must remove children first, which will have a longer path than the parent
+ for di in reversed(dirs):
+ f.write(di + "\n")
+ f.close()
+
+ for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
+ bb.build.exec_func(postinst, d)
+
+ for lock in locks:
+ bb.utils.unlockfile(lock)
+
+def sstate_installpkg(ss, d):
+ import oe.path
+
+ def prepdir(dir):
+ # remove dir if it exists, ensure any parent directories do exist
+ if os.path.exists(dir):
+ oe.path.remove(dir)
+ bb.mkdirhier(dir)
+ oe.path.remove(dir)
+
+ sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['name'])
+ sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['name'] + ".tgz"
+
+ if not os.path.exists(sstatepkg):
+ pstaging_fetch(sstatepkg, d)
+
+ if not os.path.isfile(sstatepkg):
+ bb.note("Staging package %s does not exist" % sstatepkg)
+ return False
+
+ sstate_clean(ss, d)
+
+ d.setVar('SSTATE_INSTDIR', sstateinst)
+ d.setVar('SSTATE_PKG', sstatepkg)
+
+ for preinst in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split():
+ bb.build.exec_func(preinst, d)
+
+ bb.build.exec_func('sstate_unpack_package', d)
+
+ # Fixup hardcoded paths
+ #
+ # Note: The logic below must match the reverse logic in
+ # sstate_hardcode_path(d)
+
+ fixmefn = sstateinst + "fixmepath"
+ if os.path.isfile(fixmefn):
+ staging = d.getVar('STAGING_DIR', True)
+ staging_target = d.getVar('STAGING_DIR_TARGET', True)
+ staging_host = d.getVar('STAGING_DIR_HOST', True)
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
+ elif bb.data.inherits_class('cross', d):
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
+ else:
+ sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
+
+ # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
+ sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
+
+ print "Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd)
+ os.system(sstate_hardcode_cmd)
+
+ # Need to remove this or we'd copy it into the target directory and may
+ # conflict with another writer
+ os.remove(fixmefn)
+
+ for state in ss['dirs']:
+ prepdir(state[1])
+ os.rename(sstateinst + state[0], state[1])
+ sstate_install(ss, d)
+
+ for plain in ss['plaindirs']:
+ workdir = d.getVar('WORKDIR', True)
+ src = sstateinst + "/" + plain.replace(workdir, '')
+ dest = plain
+ bb.mkdirhier(src)
+ prepdir(dest)
+ os.rename(src, dest)
+
+ return True
+
+def sstate_clean_cachefile(ss, d):
+ import oe.path
+
+ sstatepkgdir = d.getVar('SSTATE_DIR', True)
+ sstatepkgfile = sstatepkgdir + '/' + d.getVar('SSTATE_PKGSPEC', True) + "*_" + ss['name'] + ".tgz*"
+ bb.note("Removing %s" % sstatepkgfile)
+ oe.path.remove(sstatepkgfile)
+
+def sstate_clean_cachefiles(d):
+ for task in (d.getVar('SSTATETASKS', True) or "").split():
+ ss = sstate_state_fromvars(d, task[3:])
+ sstate_clean_cachefile(ss, d)
+
+def sstate_clean_manifest(manifest, d):
+ import oe.path
+
+ mfile = open(manifest)
+ entries = mfile.readlines()
+ mfile.close()
+
+ for entry in entries:
+ entry = entry.strip()
+ bb.debug(2, "Removing manifest: %s" % entry)
+ # We can race against another package populating directories as we're removing them
+ # so we ignore errors here.
+ try:
+ if entry.endswith("/"):
+ if os.path.islink(entry[:-1]):
+ os.remove(entry[:-1])
+ elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
+ os.rmdir(entry[:-1])
+ else:
+ oe.path.remove(entry)
+ except OSError:
+ pass
+
+ oe.path.remove(manifest)
+
+def sstate_clean(ss, d):
+ import oe.path
+
+ manifest = d.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['name'])
+
+ if os.path.exists(manifest):
+ locks = []
+ for lock in ss['lockfiles-shared']:
+ locks.append(bb.utils.lockfile(lock))
+ for lock in ss['lockfiles']:
+ locks.append(bb.utils.lockfile(lock))
+
+ sstate_clean_manifest(manifest, d)
+
+ for lock in locks:
+ bb.utils.unlockfile(lock)
+
+ stfile = d.getVar("STAMP", True) + ".do_" + ss['task']
+ extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
+ oe.path.remove(stfile)
+ oe.path.remove(stfile + "_setscene")
+ if extrainf:
+ oe.path.remove(stfile + ".*" + extrainf)
+ oe.path.remove(stfile + "_setscene" + ".*" + extrainf)
+ else:
+ oe.path.remove(stfile + ".*")
+ oe.path.remove(stfile + "_setscene" + ".*")
+
+CLEANFUNCS += "sstate_cleanall"
+
+python sstate_cleanall() {
+ import fnmatch
+
+ bb.note("Removing shared state for package %s" % d.getVar('PN', True))
+
+ manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
+ manifest_prefix = d.getVar("SSTATE_MANFILEPREFIX", True)
+ manifest_pattern = os.path.basename(manifest_prefix) + ".*"
+
+ if not os.path.exists(manifest_dir):
+ return
+
+ for manifest in (os.listdir(manifest_dir)):
+ if fnmatch.fnmatch(manifest, manifest_pattern):
+ name = manifest.replace(manifest_pattern[:-1], "")
+ namemap = d.getVar('SSTATETASKNAMES', True).split()
+ tasks = d.getVar('SSTATETASKS', True).split()
+ if name not in namemap:
+ continue
+ taskname = tasks[namemap.index(name)]
+ shared_state = sstate_state_fromvars(d, taskname[3:])
+ sstate_clean(shared_state, d)
+}
+
+def sstate_hardcode_path(d):
+ # Need to remove hardcoded paths and fix these when we install the
+ # staging packages.
+ #
+ # Note: the logic in this function needs to match the reverse logic
+ # in sstate_installpkg(ss, d)
+
+ staging = d.getVar('STAGING_DIR', True)
+ staging_target = d.getVar('STAGING_DIR_TARGET', True)
+ staging_host = d.getVar('STAGING_DIR_HOST', True)
+ sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
+
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
+ sstate_grep_cmd = "grep -l -e '%s'" % (staging)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
+ elif bb.data.inherits_class('cross', d):
+ sstate_grep_cmd = "grep -l -e '(%s|%s)'" % (staging_target, staging)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
+ else:
+ sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
+ sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
+
+ fixmefn = sstate_builddir + "fixmepath"
+
+ sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
+ sstate_filelist_cmd = "tee %s" % (fixmefn)
+
+ # fixmepath file needs relative paths, drop sstate_builddir prefix
+ sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
+
+ # Limit the fixpaths and sed operations based on the initial grep search
+ # This has the side effect of making sure the vfs cache is hot
+ sstate_hardcode_cmd = "%s | xargs %s | %s | xargs --no-run-if-empty %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, sstate_sed_cmd)
+
+ print "Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd)
+ os.system(sstate_hardcode_cmd)
+
+ # If the fixmefn is empty, remove it..
+ if os.stat(fixmefn).st_size == 0:
+ os.remove(fixmefn)
+ else:
+ print "Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd)
+ os.system(sstate_filelist_relative_cmd)
+
+def sstate_package(ss, d):
+ import oe.path
+
+ def make_relative_symlink(path, outputpath, d):
+ # Replace out absolute TMPDIR paths in symlinks with relative ones
+ if not os.path.islink(path):
+ return
+ link = os.readlink(path)
+ if not os.path.isabs(link):
+ return
+ if not link.startswith(tmpdir):
+ return
+
+ depth = link.rpartition(tmpdir)[2].count('/')
+ base = link.partition(tmpdir)[2].strip()
+ while depth > 1:
+ base = "../" + base
+ depth -= 1
+
+ bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
+ os.remove(path)
+ os.symlink(base, path)
+
+ tmpdir = d.getVar('TMPDIR', True)
+
+ sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['name'])
+ sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['name'] + ".tgz"
+ bb.mkdirhier(sstatebuild)
+ bb.mkdirhier(os.path.dirname(sstatepkg))
+ for state in ss['dirs']:
+ srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
+ for walkroot, dirs, files in os.walk(state[1]):
+ for file in files:
+ srcpath = os.path.join(walkroot, file)
+ dstpath = srcpath.replace(state[1], sstatebuild + state[0])
+ make_relative_symlink(srcpath, dstpath, d)
+ for dir in dirs:
+ srcpath = os.path.join(walkroot, dir)
+ dstpath = srcpath.replace(state[1], sstatebuild + state[0])
+ make_relative_symlink(srcpath, dstpath, d)
+ bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
+ oe.path.copytree(state[1], sstatebuild + state[0])
+
+ workdir = d.getVar('WORKDIR', True)
+ for plain in ss['plaindirs']:
+ pdir = plain.replace(workdir, sstatebuild)
+ bb.mkdirhier(plain)
+ bb.mkdirhier(pdir)
+ oe.path.copytree(plain, pdir)
+
+ d.setVar('SSTATE_BUILDDIR', sstatebuild)
+ d.setVar('SSTATE_PKG', sstatepkg)
+ sstate_hardcode_path(d)
+ bb.build.exec_func('sstate_create_package', d)
+
+ bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
+
+ return
+
+def pstaging_fetch(sstatepkg, d):
+ import bb.fetch2
+
+ # Only try and fetch if the user has configured a mirror
+ mirrors = d.getVar('SSTATE_MIRRORS', True)
+ if not mirrors:
+ return
+
+ # Copy the data object and override DL_DIR and SRC_URI
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ dldir = localdata.expand("${SSTATE_DIR}")
+ srcuri = "file://" + os.path.basename(sstatepkg)
+
+ bb.mkdirhier(dldir)
+
+ localdata.setVar('DL_DIR', dldir)
+ localdata.setVar('PREMIRRORS', mirrors)
+ localdata.setVar('SRC_URI', srcuri)
+
+ # Try a fetch from the sstate mirror, if it fails just return and
+ # we will build the package
+ try:
+ fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+ fetcher.download()
+
+ # Need to optimise this, if using file:// urls, the fetcher just changes the local path
+ # For now work around by symlinking
+ localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
+ if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
+ os.symlink(localpath, sstatepkg)
+
+ except bb.fetch2.BBFetchException:
+ pass
+
+def sstate_setscene(d):
+ shared_state = sstate_state_fromvars(d)
+ accelerate = sstate_installpkg(shared_state, d)
+ if not accelerate:
+ raise bb.build.FuncFailed("No suitable staging package found")
+
+python sstate_task_prefunc () {
+ shared_state = sstate_state_fromvars(d)
+ sstate_clean(shared_state, d)
+}
+
+python sstate_task_postfunc () {
+ shared_state = sstate_state_fromvars(d)
+ sstate_install(shared_state, d)
+ for intercept in shared_state['interceptfuncs']:
+ bb.build.exec_func(intercept, d)
+ sstate_package(shared_state, d)
+}
+
+
+#
+# Shell function to generate a sstate package from a directory
+# set as SSTATE_BUILDDIR
+#
+sstate_create_package () {
+ cd ${SSTATE_BUILDDIR}
+ TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
+ # Need to handle empty directories
+ if [ "$(ls -A)" ]; then
+ tar -czf $TFILE *
+ else
+ tar -cz --file=$TFILE --files-from=/dev/null
+ fi
+ chmod 0664 $TFILE
+ mv $TFILE ${SSTATE_PKG}
+
+ cd ${WORKDIR}
+ rm -rf ${SSTATE_BUILDDIR}
+}
+
+#
+# Shell function to decompress and prepare a package for installation
+#
+sstate_unpack_package () {
+ mkdir -p ${SSTATE_INSTDIR}
+ cd ${SSTATE_INSTDIR}
+ tar -xvzf ${SSTATE_PKG}
+}
+
+BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
+
+def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d):
+
+ ret = []
+ # This needs to go away, FIXME
+ mapping = {
+ "do_populate_sysroot" : "populate-sysroot",
+ "do_populate_lic" : "populate-lic",
+ "do_package_write_ipk" : "deploy-ipk",
+ "do_package_write_deb" : "deploy-deb",
+ "do_package_write_rpm" : "deploy-rpm",
+ "do_package" : "package",
+ "do_deploy" : "deploy",
+ }
+
+ for task in range(len(sq_fn)):
+ sstatefile = d.expand("${SSTATE_DIR}/" + sq_hashfn[task] + "_" + mapping[sq_task[task]] + ".tgz")
+ sstatefile = sstatefile.replace("${BB_TASKHASH}", sq_hash[task])
+ if os.path.exists(sstatefile):
+ bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
+ ret.append(task)
+ continue
+ else:
+ bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
+
+ mirrors = d.getVar("SSTATE_MIRRORS", True)
+ if mirrors:
+ # Copy the data object and override DL_DIR and SRC_URI
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ dldir = localdata.expand("${SSTATE_DIR}")
+ localdata.setVar('DL_DIR', dldir)
+ localdata.setVar('PREMIRRORS', mirrors)
+
+ bb.debug(2, "SState using premirror of: %s" % mirrors)
+
+ for task in range(len(sq_fn)):
+ if task in ret:
+ continue
+
+ sstatefile = d.expand("${SSTATE_DIR}/" + sq_hashfn[task] + "_" + mapping[sq_task[task]] + ".tgz")
+ sstatefile = sstatefile.replace("${BB_TASKHASH}", sq_hash[task])
+
+ srcuri = "file://" + os.path.basename(sstatefile)
+ localdata.setVar('SRC_URI', srcuri)
+ bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
+
+ try:
+ fetcher = bb.fetch2.Fetch(srcuri.split(), localdata)
+ fetcher.checkstatus()
+ bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
+ ret.append(task)
+ except:
+ bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
+ pass
+
+ return ret
+
diff --git a/meta/classes/staging.bbclass b/meta/classes/staging.bbclass
new file mode 100644
index 000000000..a98f51deb
--- /dev/null
+++ b/meta/classes/staging.bbclass
@@ -0,0 +1,121 @@
+
+packagedstaging_fastpath () {
+ :
+}
+
+sysroot_stage_dir() {
+ src="$1"
+ dest="$2"
+ # if the src doesn't exist don't do anything
+ if [ ! -d "$src" ]; then
+ return
+ fi
+
+ # We only want to stage the contents of $src if it's non-empty so first rmdir $src
+ # then if it still exists (rmdir on non-empty dir fails) we can copy its contents
+ rmdir "$src" 2> /dev/null || true
+ # However we always want to stage a $src itself, even if it's empty
+ mkdir -p "$dest"
+ if [ -d "$src" ]; then
+ tar -cf - -C "$src" -ps . | tar -xf - -C "$dest"
+ fi
+}
+
+sysroot_stage_libdir() {
+ src="$1"
+ dest="$2"
+
+ sysroot_stage_dir $src $dest
+}
+
+sysroot_stage_dirs() {
+ from="$1"
+ to="$2"
+
+ sysroot_stage_dir $from${includedir} $to${includedir}
+ if [ "${BUILD_SYS}" = "${HOST_SYS}" ]; then
+ sysroot_stage_dir $from${bindir} $to${bindir}
+ sysroot_stage_dir $from${sbindir} $to${sbindir}
+ sysroot_stage_dir $from${base_bindir} $to${base_bindir}
+ sysroot_stage_dir $from${base_sbindir} $to${base_sbindir}
+ sysroot_stage_dir $from${libexecdir} $to${libexecdir}
+ sysroot_stage_dir $from${sysconfdir} $to${sysconfdir}
+ sysroot_stage_dir $from${localstatedir} $to${localstatedir}
+ fi
+ if [ -d $from${libdir} ]
+ then
+ sysroot_stage_libdir $from/${libdir} $to${libdir}
+ fi
+ if [ -d $from${base_libdir} ]
+ then
+ sysroot_stage_libdir $from${base_libdir} $to${base_libdir}
+ fi
+ sysroot_stage_dir $from${datadir} $to${datadir}
+}
+
+sysroot_stage_all() {
+ sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR}
+}
+
+do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}"
+do_populate_sysroot[umask] = "022"
+
+addtask populate_sysroot after do_install
+
+SYSROOT_PREPROCESS_FUNCS ?= ""
+SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/"
+SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
+
+# We clean out any existing sstate from the sysroot if we rerun configure
+python sysroot_cleansstate () {
+ ss = sstate_state_fromvars(d, "populate_sysroot")
+ sstate_clean(ss, d)
+}
+do_configure[prefuncs] += "sysroot_cleansstate"
+
+
+BB_SETSCENE_VERIFY_FUNCTION = "sysroot_checkhashes"
+
+def sysroot_checkhashes(covered, tasknames, fnids, fns, d):
+ problems = set()
+ configurefnids = set()
+ for task in xrange(len(tasknames)):
+ if tasknames[task] == "do_configure" and task not in covered:
+ configurefnids.add(fnids[task])
+ for task in covered:
+ if tasknames[task] == "do_populate_sysroot" and fnids[task] in configurefnids:
+ problems.add(task)
+ return problems
+
+python do_populate_sysroot () {
+ #
+ # if do_stage exists, we're legacy. In that case run the do_stage,
+ # modify the SYSROOT_DESTDIR variable and then run the staging preprocess
+ # functions against staging directly.
+ #
+ # Otherwise setup a destdir, copy the results from do_install
+ # and run the staging preprocess against that
+ #
+
+ bb.build.exec_func("sysroot_stage_all", d)
+ for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+}
+
+SSTATETASKS += "do_populate_sysroot"
+do_populate_sysroot[sstate-name] = "populate-sysroot"
+do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}"
+do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_HOST}/"
+do_populate_sysroot[stamp-extra-info] = "${MACHINE}"
+
+python do_populate_sysroot_setscene () {
+ sstate_setscene(d)
+}
+addtask do_populate_sysroot_setscene
+
+python () {
+ if d.getVar('do_stage', True) is not None:
+ bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True))
+}
+
+
diff --git a/meta/classes/syslinux.bbclass b/meta/classes/syslinux.bbclass
index fb7597470..700ea5391 100644
--- a/meta/classes/syslinux.bbclass
+++ b/meta/classes/syslinux.bbclass
@@ -2,18 +2,67 @@
# Copyright (C) 2004-2006, Advanced Micro Devices, Inc. All Rights Reserved
# Released under the MIT license (see packages/COPYING)
-# This creates a configuration file suitable for use with syslinux.
+# Provide syslinux specific functions for building bootable images.
+
+# External variables
+# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+# ${AUTO_SYSLINUXMENU} - set this to 1 to enable creating an automatic menu
+# ${LABELS} - a list of targets for the automatic config
+# ${APPEND} - an override list of append strings for each label
+# ${SYSLINUX_OPTS} - additional options to add to the syslinux file ';' delimited
+
+do_bootimg[depends] += "syslinux:do_populate_sysroot \
+ syslinux-native:do_populate_sysroot"
+
+SYSLINUXCFG = "${S}/syslinux.cfg"
+SYSLINUXMENU = "${S}/menu"
+
+ISOLINUXDIR = "/isolinux"
+SYSLINUXDIR = "/"
+ISO_BOOTIMG = "isolinux/isolinux.bin"
+ISO_BOOTCAT = "isolinux/boot.cat"
+MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
+APPEND_prepend = " ${SYSLINUX_ROOT} "
+
+syslinux_populate() {
+ DEST=$1
+ BOOTDIR=$2
+ CFGNAME=$3
+
+ install -d ${DEST}${BOOTDIR}
+
+ # Install the config files
+ install -m 0644 ${SYSLINUXCFG} ${DEST}${BOOTDIR}/${CFGNAME}
+ if [ -f ${SYSLINUXMENU} ]; then
+ install -m 0644 ${SYSLINUXMENU} ${DEST}${BOOTDIR}
+ fi
+}
+
+syslinux_iso_populate() {
+ syslinux_populate ${ISODIR} ${ISOLINUXDIR} isolinux.cfg
+ install -m 0644 ${STAGING_LIBDIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
+}
+
+syslinux_hddimg_populate() {
+ syslinux_populate ${HDDDIR} ${SYSLINUXDIR} syslinux.cfg
+ install -m 0444 ${STAGING_LIBDIR}/syslinux/ldlinux.sys ${HDDDIR}${SYSLINUXDIR}/ldlinux.sys
+}
+
+syslinux_hddimg_install() {
+ syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+}
python build_syslinux_menu () {
import copy
import sys
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR is not defined")
return
- labels = bb.data.getVar('LABELS', d, 1)
+ labels = d.getVar('LABELS', True)
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
@@ -22,14 +71,12 @@ python build_syslinux_menu () {
bb.debug(1, "No labels, nothing to do")
return
- cfile = bb.data.getVar('SYSLINUXMENU', d, 1)
+ cfile = d.getVar('SYSLINUXMENU', True)
if not cfile:
raise bb.build.FuncFailed('Unable to read SYSLINUXMENU')
- bb.mkdirhier(os.path.dirname(cfile))
-
try:
- cfgfile = file(cfile, 'w')
+ cfgfile = file(cfile, 'w')
except OSError:
raise bb.build.funcFailed('Unable to open %s' % (cfile))
@@ -45,15 +92,15 @@ python build_syslinux_menu () {
from copy import deepcopy
localdata = deepcopy(d)
- overrides = bb.data.getVar('OVERRIDES', localdata)
+ overrides = localdata.getVar('OVERRIDES')
if not overrides:
raise bb.build.FuncFailed('OVERRIDES not defined')
- overrides = bb.data.expand(overrides, localdata)
+ overrides = localdata.expand(overrides)
- bb.data.setVar('OVERRIDES', label + ':' + overrides, localdata)
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
bb.data.update_data(localdata)
- usage = bb.data.getVar('USAGE', localdata, 1)
+ usage = localdata.getVar('USAGE', True)
cfgfile.write(' \x0F\x30\x3E%16s\x0F\x30\x37: ' % (label))
cfgfile.write('%s\n' % (usage))
@@ -67,12 +114,12 @@ python build_syslinux_cfg () {
import copy
import sys
- workdir = bb.data.getVar('WORKDIR', d, 1)
+ workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
- labels = bb.data.getVar('LABELS', d, 1)
+ labels = d.getVar('LABELS', True)
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
@@ -81,70 +128,68 @@ python build_syslinux_cfg () {
bb.debug(1, "No labels, nothing to do")
return
- cfile = bb.data.getVar('SYSLINUXCFG', d, 1)
+ cfile = d.getVar('SYSLINUXCFG', True)
if not cfile:
raise bb.build.FuncFailed('Unable to read SYSLINUXCFG')
- bb.mkdirhier(os.path.dirname(cfile))
-
try:
- cfgfile = file(cfile, 'w')
+ cfgfile = file(cfile, 'w')
except OSError:
raise bb.build.funcFailed('Unable to open %s' % (cfile))
- # FIXME - the timeout should be settable
- # And maybe the default too
- # Definately the prompt
-
cfgfile.write('# Automatically created by OE\n')
- opts = bb.data.getVar('SYSLINUX_OPTS', d, 1)
+ opts = d.getVar('SYSLINUX_OPTS', True)
if opts:
for opt in opts.split(';'):
cfgfile.write('%s\n' % opt)
-
+
cfgfile.write('ALLOWOPTIONS 1\n');
cfgfile.write('DEFAULT %s\n' % (labels.split()[0]))
- timeout = bb.data.getVar('SYSLINUX_TIMEOUT', d, 1)
+ timeout = d.getVar('SYSLINUX_TIMEOUT', True)
if timeout:
cfgfile.write('TIMEOUT %s\n' % timeout)
else:
cfgfile.write('TIMEOUT 50\n')
- cfgfile.write('PROMPT 1\n')
+ prompt = d.getVar('SYSLINUX_PROMPT', True)
+ if prompt:
+ cfgfile.write('PROMPT %s\n' % prompt)
+ else:
+ cfgfile.write('PROMPT 1\n')
- menu = bb.data.getVar('AUTO_SYSLINUXMENU', d, 1)
+ menu = d.getVar('AUTO_SYSLINUXMENU', True)
# This is ugly. My bad.
if menu:
bb.build.exec_func('build_syslinux_menu', d)
- mfile = bb.data.getVar('SYSLINUXMENU', d, 1)
+ mfile = d.getVar('SYSLINUXMENU', True)
cfgfile.write('DISPLAY %s\n' % (mfile.split('/')[-1]) )
for label in labels.split():
localdata = bb.data.createCopy(d)
- overrides = bb.data.getVar('OVERRIDES', localdata, True)
+ overrides = localdata.getVar('OVERRIDES', True)
if not overrides:
raise bb.build.FuncFailed('OVERRIDES not defined')
- bb.data.setVar('OVERRIDES', label + ':' + overrides, localdata)
+ localdata.setVar('OVERRIDES', label + ':' + overrides)
bb.data.update_data(localdata)
- cfgfile.write('LABEL %s\nKERNEL vmlinuz\n' % (label))
+ cfgfile.write('LABEL %s\nKERNEL /vmlinuz\n' % (label))
- append = bb.data.getVar('APPEND', localdata, 1)
- initrd = bb.data.getVar('INITRD', localdata, 1)
+ append = localdata.getVar('APPEND', True)
+ initrd = localdata.getVar('INITRD', True)
if append:
cfgfile.write('APPEND ')
if initrd:
- cfgfile.write('initrd=initrd ')
+ cfgfile.write('initrd=/initrd ')
cfgfile.write('LABEL=%s '% (label))
diff --git a/meta/classes/task.bbclass b/meta/classes/task.bbclass
index 4edd70482..22c2fd374 100644
--- a/meta/classes/task.bbclass
+++ b/meta/classes/task.bbclass
@@ -17,11 +17,11 @@ PACKAGE_ARCH = "all"
# to the list. Their dependencies (RRECOMMENDS) are handled as usual
# by package_depchains in a following step.
python () {
- packages = bb.data.getVar('PACKAGES', d, 1).split()
+ packages = d.getVar('PACKAGES', True).split()
genpackages = []
for pkg in packages:
for postfix in ['-dbg', '-dev']:
genpackages.append(pkg+postfix)
- bb.data.setVar('PACKAGES', ' '.join(packages+genpackages), d)
+ d.setVar('PACKAGES', ' '.join(packages+genpackages))
}
diff --git a/meta/classes/terminal.bbclass b/meta/classes/terminal.bbclass
new file mode 100644
index 000000000..3cfc84b44
--- /dev/null
+++ b/meta/classes/terminal.bbclass
@@ -0,0 +1,41 @@
+OE_TERMINAL ?= 'auto'
+OE_TERMINAL[type] = 'choice'
+OE_TERMINAL[choices] = 'auto none \
+ ${@" ".join(o.name \
+ for o in oe.terminal.prioritized())}'
+
+OE_TERMINAL_EXPORTS = 'XAUTHORITY SHELL DBUS_SESSION_BUS_ADDRESS DISPLAY EXTRA_OEMAKE SCREENDIR'
+OE_TERMINAL_EXPORTS[type] = 'list'
+
+XAUTHORITY ?= "${HOME}/.Xauthority"
+SHELL ?= "bash"
+
+
+def oe_terminal(command, title, d):
+ import oe.data
+ import oe.terminal
+
+ for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
+ value = d.getVar(export, True)
+ if value is not None:
+ os.environ[export] = str(value)
+
+ terminal = oe.data.typed_value('OE_TERMINAL', d).lower()
+ if terminal == 'none':
+ bb.fatal('Devshell usage disabled with OE_TERMINAL')
+ elif terminal != 'auto':
+ try:
+ oe.terminal.spawn(terminal, command, title)
+ return
+ except oe.terminal.UnsupportedTerminal:
+ bb.warn('Unsupported terminal "%s", defaulting to "auto"' %
+ terminal)
+ except oe.terminal.ExecutionError as exc:
+ bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
+
+ try:
+ oe.terminal.spawn_preferred(command, title)
+ except oe.terminal.NoSupportedTerminals:
+ bb.fatal('No valid terminal found, unable to open devshell')
+ except oe.terminal.ExecutionError as exc:
+ bb.fatal('Unable to spawn terminal %s: %s' % (terminal, exc))
diff --git a/meta/classes/tinderclient.bbclass b/meta/classes/tinderclient.bbclass
index bc004efb2..1027c7cef 100644
--- a/meta/classes/tinderclient.bbclass
+++ b/meta/classes/tinderclient.bbclass
@@ -51,8 +51,7 @@ def tinder_format_http_post(d,status,log):
for the tinderbox to be happy.
"""
- from bb import data, build
- import os,random
+ import random
# the variables we will need to send on this form post
variables = {
@@ -125,7 +124,6 @@ def tinder_build_start(d):
report = report[report.find(search)+len(search):]
report = report[0:report.find("'")]
- import bb
bb.note("Machine ID assigned by tinderbox: %s" % report )
# now we will need to save the machine number
@@ -165,7 +163,6 @@ def tinder_print_info(d):
"""
from bb import data
- import os
# get the local vars
time = tinder_time_string()
@@ -216,7 +213,6 @@ def tinder_print_env():
Print the environment variables of this build
"""
from bb import data
- import os
time_start = tinder_time_string()
time_end = tinder_time_string()
@@ -278,7 +274,7 @@ def tinder_do_tinder_report(event):
"""
from bb.event import getName
from bb import data, mkdirhier, build
- import os, glob
+ import glob
# variables
name = getName(event)
@@ -371,14 +367,14 @@ def tinder_do_tinder_report(event):
addhandler tinderclient_eventhandler
python tinderclient_eventhandler() {
from bb import note, error, data
- from bb.event import NotHandled, getName
+ from bb.event import getName
if e.data is None or getName(e) == "MsgNote":
- return NotHandled
+ return
do_tinder_report = data.getVar('TINDER_REPORT', e.data, True)
if do_tinder_report and do_tinder_report == "1":
tinder_do_tinder_report(e)
- return NotHandled
+ return
}
diff --git a/meta/classes/tmake.bbclass b/meta/classes/tmake.bbclass
deleted file mode 100644
index dbd0bf276..000000000
--- a/meta/classes/tmake.bbclass
+++ /dev/null
@@ -1,77 +0,0 @@
-DEPENDS_prepend="tmake "
-
-python tmake_do_createpro() {
- import glob, sys
- from bb import note
- out_vartranslate = {
- "TMAKE_HEADERS": "HEADERS",
- "TMAKE_INTERFACES": "INTERFACES",
- "TMAKE_TEMPLATE": "TEMPLATE",
- "TMAKE_CONFIG": "CONFIG",
- "TMAKE_DESTDIR": "DESTDIR",
- "TMAKE_SOURCES": "SOURCES",
- "TMAKE_DEPENDPATH": "DEPENDPATH",
- "TMAKE_INCLUDEPATH": "INCLUDEPATH",
- "TMAKE_TARGET": "TARGET",
- "TMAKE_LIBS": "LIBS",
- }
- s = data.getVar('S', d, 1) or ""
- os.chdir(s)
- profiles = (data.getVar('TMAKE_PROFILES', d, 1) or "").split()
- if not profiles:
- profiles = ["*.pro"]
- for pro in profiles:
- ppro = glob.glob(pro)
- if ppro:
- if ppro != [pro]:
- del profiles[profiles.index(pro)]
- profiles += ppro
- continue
- if ppro[0].find('*'):
- del profiles[profiles.index(pro)]
- continue
- else:
- del profiles[profiles.index(pro)]
- if len(profiles) != 0:
- return
-
- # output .pro using this metadata store
- try:
- from __builtin__ import file
- profile = file(data.expand('${PN}.pro', d), 'w')
- except OSError:
- raise FuncFailed("unable to open pro file for writing.")
-
-# fd = sys.__stdout__
- fd = profile
- for var in out_vartranslate.keys():
- val = data.getVar(var, d, 1)
- if val:
- fd.write("%s\t: %s\n" % (out_vartranslate[var], val))
-
-# if fd is not sys.__stdout__:
- fd.close()
-}
-
-tmake_do_configure() {
- paths="${STAGING_DATADIR}/tmake/qws/${TARGET_OS}-${TARGET_ARCH}-g++ ${STAGING_DATADIR}/tmake/$OS-g++"
- if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
- paths="${STAGING_DATADIR}/tmake/qws/${TARGET_OS}-x86-g++ $paths"
- fi
- for i in $paths; do
- if test -e $i; then
- export TMAKEPATH=$i
- break
- fi
- done
-
- if [ -z "${TMAKE_PROFILES}" ]; then
- TMAKE_PROFILES="`ls *.pro`"
- fi
- tmake -o Makefile $TMAKE_PROFILES || die "Error calling tmake on ${TMAKE_PROFILES}"
-}
-
-EXPORT_FUNCTIONS do_configure do_createpro
-
-addtask configure after do_unpack do_patch before do_compile
-addtask createpro before do_configure after do_unpack do_patch
diff --git a/meta/classes/toolchain-scripts.bbclass b/meta/classes/toolchain-scripts.bbclass
new file mode 100644
index 000000000..2099c4def
--- /dev/null
+++ b/meta/classes/toolchain-scripts.bbclass
@@ -0,0 +1,148 @@
+inherit siteinfo
+
+# This function creates an environment-setup-script for use in a deployable SDK
+toolchain_create_sdk_env_script () {
+ # Create environment setup script
+ script=${SDK_OUTPUT}/${SDKPATH}/environment-setup-${MULTIMACH_TARGET_SYS}
+ rm -f $script
+ touch $script
+ echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/${MULTIMACH_TARGET_SYS}:$PATH' >> $script
+ echo 'export PKG_CONFIG_SYSROOT_DIR=${SDKTARGETSYSROOT}' >> $script
+ echo 'export PKG_CONFIG_PATH=${SDKTARGETSYSROOT}${libdir}/pkgconfig' >> $script
+ echo 'export CONFIG_SITE=${SDKPATH}/site-config-${MULTIMACH_TARGET_SYS}' >> $script
+ echo 'export CC=${TARGET_PREFIX}gcc' >> $script
+ echo 'export CXX=${TARGET_PREFIX}g++' >> $script
+ echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
+ echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
+ echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=${SDKTARGETSYSROOT}"' >> $script
+ if [ "${TARGET_OS}" = "darwin8" ]; then
+ echo 'export TARGET_CFLAGS="-I${SDKTARGETSYSROOT}${includedir}"' >> $script
+ echo 'export TARGET_LDFLAGS="-L${SDKTARGETSYSROOT}${libdir}"' >> $script
+ # Workaround darwin toolchain sysroot path problems
+ cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
+ ln -s /usr/local local
+ fi
+ echo 'export CFLAGS="${TARGET_CC_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
+ echo 'export CXXFLAGS="${TARGET_CC_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
+ echo 'export LDFLAGS="${TARGET_LD_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
+ echo 'export CPPFLAGS="${TARGET_CC_ARCH} --sysroot=${SDKTARGETSYSROOT}"' >> $script
+ echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
+ echo 'export OECORE_TARGET_SYSROOT="${SDKTARGETSYSROOT}"' >> $script
+ echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
+ echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
+ echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
+}
+
+# This function creates an environment-setup-script in the TMPDIR which enables
+# a OE-core IDE to integrate with the build tree
+toolchain_create_tree_env_script () {
+ script=${TMPDIR}/environment-setup-${MULTIMACH_TARGET_SYS}
+ rm -f $script
+ touch $script
+ echo 'export PATH=${PATH}' >> $script
+ echo 'export PKG_CONFIG_SYSROOT_DIR=${PKG_CONFIG_SYSROOT_DIR}' >> $script
+ echo 'export PKG_CONFIG_PATH=${PKG_CONFIG_PATH}' >> $script
+
+ echo 'export CONFIG_SITE="${@siteinfo_get_files(d)}"' >> $script
+
+ echo 'export CC=${TARGET_PREFIX}gcc' >> $script
+ echo 'export CXX=${TARGET_PREFIX}g++' >> $script
+ echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
+ echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
+ echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${BUILD_SYS} --with-libtool-sysroot=${STAGING_DIR_TARGET}"' >> $script
+ if [ "${TARGET_OS}" = "darwin8" ]; then
+ echo 'export TARGET_CFLAGS="-I${STAGING_DIR}${MACHINE}${includedir}"' >> $script
+ echo 'export TARGET_LDFLAGS="-L${STAGING_DIR}${MACHINE}${libdir}"' >> $script
+ # Workaround darwin toolchain sysroot path problems
+ cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
+ ln -s /usr/local local
+ fi
+ echo 'export CFLAGS="${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
+ echo 'export CXXFLAGS="${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
+ echo 'export LDFLAGS="${TARGET_LD_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
+ echo 'export CPPFLAGS="${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"' >> $script
+ echo 'export OECORE_NATIVE_SYSROOT="${STAGING_DIR_NATIVE}"' >> $script
+ echo 'export OECORE_TARGET_SYSROOT="${STAGING_DIR_TARGET}"' >> $script
+ echo 'export OECORE_ACLOCAL_OPTS="-I ${STAGING_DIR_NATIVE}/usr/share/aclocal"' >> $script
+ echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
+ echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
+}
+
+# This function creates an environment-setup-script for use by the ADT installer
+toolchain_create_sdk_env_script_for_installer () {
+ # Create environment setup script
+ local multimach_target_sys=$1
+ script=${SDK_OUTPUT}/${SDKPATH}/environment-setup-${multimach_target_sys}
+ rm -f $script
+ touch $script
+ echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:${SDKPATHNATIVE}${bindir_nativesdk}/'"${multimach_target_sys}"':$PATH' >> $script
+ echo 'export PKG_CONFIG_SYSROOT_DIR=##SDKTARGETSYSROOT##' >> $script
+ echo 'export PKG_CONFIG_PATH=##SDKTARGETSYSROOT##${target_libdir}/pkgconfig' >> $script
+ echo 'export CONFIG_SITE=${SDKPATH}/site-config-'"${multimach_target_sys}" >> $script
+ echo 'export CC=${TARGET_PREFIX}gcc' >> $script
+ echo 'export CXX=${TARGET_PREFIX}g++' >> $script
+ echo 'export GDB=${TARGET_PREFIX}gdb' >> $script
+ echo 'export TARGET_PREFIX=${TARGET_PREFIX}' >> $script
+ echo 'export CONFIGURE_FLAGS="--target=${TARGET_SYS} --host=${TARGET_SYS} --build=${SDK_ARCH}-linux --with-libtool-sysroot=##SDKTARGETSYSROOT##"' >> $script
+ if [ "${TARGET_OS}" = "darwin8" ]; then
+ echo 'export TARGET_CFLAGS="-I##SDKTARGETSYSROOT##${target_includedir}"' >> $script
+ echo 'export TARGET_LDFLAGS="-L##SDKTARGETSYSROOT##{target_libdir}"' >> $script
+ # Workaround darwin toolchain sysroot path problems
+ cd ${SDK_OUTPUT}${SDKTARGETSYSROOT}/usr
+ ln -s /usr/local local
+ fi
+ echo 'export CFLAGS="${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
+ echo 'export CXXFLAGS="${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
+ echo 'export LDFLAGS="${TARGET_LD_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
+ echo 'export CPPFLAGS="${TARGET_CC_ARCH} --sysroot=##SDKTARGETSYSROOT##"' >> $script
+ echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script
+ echo 'export OECORE_TARGET_SYSROOT="##SDKTARGETSYSROOT##"' >> $script
+ echo 'export OECORE_ACLOCAL_OPTS="-I ${SDKPATHNATIVE}/usr/share/aclocal"' >> $script
+ echo 'export OECORE_DISTRO_VERSION="${DISTRO_VERSION}"' >> $script
+ echo 'export OECORE_SDK_VERSION="${SDK_VERSION}"' >> $script
+}
+
+#we get the cached site config in the runtime
+TOOLCHAIN_CONFIGSITE_NOCACHE := "${@siteinfo_get_files(d, True)}"
+TOOLCHAIN_CONFIGSITE_SYSROOTCACHE := "${STAGING_DATADIR}/${TARGET_SYS}_config_site.d"
+TOOLCHAIN_NEED_CONFIGSITE_CACHE = "${TCLIBC} ncurses"
+
+#This function create a site config file
+toolchain_create_sdk_siteconfig () {
+ local siteconfig=$1
+
+ rm -f $siteconfig
+ touch $siteconfig
+
+ for sitefile in ${TOOLCHAIN_CONFIGSITE_NOCACHE} ; do
+ cat $sitefile >> $siteconfig
+ done
+
+ #get cached site config
+ for sitefile in ${TOOLCHAIN_NEED_CONFIGSITE_CACHE}; do
+ if [ -r ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config ]; then
+ cat ${TOOLCHAIN_CONFIGSITE_SYSROOTCACHE}/${sitefile}_config >> $siteconfig
+ fi
+ done
+}
+# The immediate expansion above can result in unwanted path dependencies here
+toolchain_create_sdk_siteconfig[vardepsexclude] = "TOOLCHAIN_CONFIGSITE_SYSROOTCACHE"
+
+#This function create a version information file
+toolchain_create_sdk_version () {
+ local versionfile=$1
+ rm -f $versionfile
+ touch $versionfile
+ echo 'Distro: ${DISTRO}' >> $versionfile
+ echo 'Distro Version: ${DISTRO_VERSION}' >> $versionfile
+ echo 'Metadata Revision: ${METADATA_REVISION}' >> $versionfile
+ echo 'Timestamp: ${DATETIME}' >> $versionfile
+}
+toolchain_create_sdk_version[vardepsexclude] = "DATETIME"
+
+python __anonymous () {
+ deps = ""
+ for dep in (d.getVar('TOOLCHAIN_NEED_CONFIGSITE_CACHE', True) or "").split():
+ deps += " %s:do_populate_sysroot" % dep
+ d.appendVarFlag('do_configure', 'depends', deps)
+}
diff --git a/meta/classes/typecheck.bbclass b/meta/classes/typecheck.bbclass
new file mode 100644
index 000000000..353532d73
--- /dev/null
+++ b/meta/classes/typecheck.bbclass
@@ -0,0 +1,12 @@
+# Check types of bitbake configuration variables
+#
+# See oe.types for details.
+
+python check_types() {
+ import oe.types
+ if isinstance(e, bb.event.ConfigParsed):
+ for key in e.data.keys():
+ if e.data.getVarFlag(key, "type"):
+ oe.data.typed_value(key, e.data)
+}
+addhandler check_types
diff --git a/meta/classes/update-alternatives.bbclass b/meta/classes/update-alternatives.bbclass
index c63581c5d..7b0518d84 100644
--- a/meta/classes/update-alternatives.bbclass
+++ b/meta/classes/update-alternatives.bbclass
@@ -1,3 +1,41 @@
+# This class is used to help the alternatives system which is useful when
+# multiple sources provide same command. You can use update-alternatives
+# command directly in your recipe, but in most cases this class simplifies
+# that job.
+#
+# There are two basic modes supported: 'single update' and 'batch update'
+#
+# 'single update' is used for a single alternative command, and you're
+# expected to provide at least below keywords:
+#
+# ALTERNATIVE_NAME - the name that the alternative is registered
+# ALTERNATIVE_PATH - the path of installed alternative
+#
+# ALTERNATIVE_PRIORITY and ALTERNATIVE_LINK are optional which have defaults
+# in this class.
+#
+# 'batch update' is used if you have multiple alternatives to be updated.
+# Unlike 'single update', 'batch update' in most times only require two
+# parameters:
+#
+# ALTERNATIVE_LINKS - a list of symbolic links for which you'd like to
+# create alternatives, with space as delimiter, e.g:
+#
+# ALTERNATIVE_LINKS = "${bindir}/cmd1 ${sbindir}/cmd2 ..."
+#
+# ALTERNATIVE_PRIORITY - optional, applies to all
+#
+# To simplify the design, this class has the assumption that for a name
+# listed in ALTERNATIVE_LINKS, say /path/cmd:
+#
+# the name of the alternative would be: cmd
+# the path of installed alternative would be: /path/cmd.${PN}
+# ${D}/path/cmd will be renamed to ${D}/path/cmd.{PN} automatically
+# priority will be the same from ALTERNATIVE_PRIORITY
+#
+# If above assumption breaks your requirement, then you still need to use
+# your own update-alternatives command directly.
+
# defaults
ALTERNATIVE_PRIORITY = "10"
ALTERNATIVE_LINK = "${bindir}/${ALTERNATIVE_NAME}"
@@ -10,28 +48,69 @@ update_alternatives_postrm() {
update-alternatives --remove ${ALTERNATIVE_NAME} ${ALTERNATIVE_PATH}
}
+# for batch alternatives, we use a simple approach to require only one parameter
+# with the rest of the info deduced implicitly
+update_alternatives_batch_postinst() {
+for link in ${ALTERNATIVE_LINKS}
+do
+ name=`basename ${link}`
+ path=${link}.${PN}
+ update-alternatives --install ${link} ${name} ${path} ${ALTERNATIVE_PRIORITY}
+done
+}
+
+update_alternatives_batch_postrm() {
+for link in ${ALTERNATIVE_LINKS}
+do
+ name=`basename ${link}`
+ path=${link}.${PN}
+ update-alternatives --remove ${name} $path
+done
+}
+
+update_alternatives_batch_doinstall() {
+ for link in ${ALTERNATIVE_LINKS}
+ do
+ mv ${D}${link} ${D}${link}.${PN}
+ done
+}
+
def update_alternatives_after_parse(d):
- import bb
- if bb.data.getVar('ALTERNATIVE_NAME', d) == None:
- raise bb.build.FuncFailed, "%s inherits update-alternatives but doesn't set ALTERNATIVE_NAME" % bb.data.getVar('FILE', d)
- if bb.data.getVar('ALTERNATIVE_PATH', d) == None:
- raise bb.build.FuncFailed, "%s inherits update-alternatives but doesn't set ALTERNATIVE_PATH" % bb.data.getVar('FILE', d)
+ if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
+ return
+
+ if d.getVar('ALTERNATIVE_LINKS') != None:
+ doinstall = d.getVar('do_install', 0)
+ doinstall += d.getVar('update_alternatives_batch_doinstall', 0)
+ d.setVar('do_install', doinstall)
+ return
+
+ if d.getVar('ALTERNATIVE_NAME') == None:
+ raise bb.build.FuncFailed, "%s inherits update-alternatives but doesn't set ALTERNATIVE_NAME" % d.getVar('FILE')
+ if d.getVar('ALTERNATIVE_PATH') == None:
+ raise bb.build.FuncFailed, "%s inherits update-alternatives but doesn't set ALTERNATIVE_PATH" % d.getVar('FILE')
python __anonymous() {
update_alternatives_after_parse(d)
}
python populate_packages_prepend () {
- pkg = bb.data.getVar('PN', d, 1)
+ pkg = d.getVar('PN', True)
bb.note('adding update-alternatives calls to postinst/postrm for %s' % pkg)
- postinst = bb.data.getVar('pkg_postinst_%s' % pkg, d, 1) or bb.data.getVar('pkg_postinst', d, 1)
+ postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
if not postinst:
postinst = '#!/bin/sh\n'
- postinst += bb.data.getVar('update_alternatives_postinst', d, 1)
- bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
- postrm = bb.data.getVar('pkg_postrm_%s' % pkg, d, 1) or bb.data.getVar('pkg_postrm', d, 1)
+ if d.getVar('ALTERNATIVE_LINKS') != None:
+ postinst += d.getVar('update_alternatives_batch_postinst', True)
+ else:
+ postinst += d.getVar('update_alternatives_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+ postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += bb.data.getVar('update_alternatives_postrm', d, 1)
- bb.data.setVar('pkg_postrm_%s' % pkg, postrm, d)
+ if d.getVar('ALTERNATIVE_LINKS') != None:
+ postrm += d.getVar('update_alternatives_batch_postrm', True)
+ else:
+ postrm += d.getVar('update_alternatives_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
}
diff --git a/meta/classes/update-rc.d.bbclass b/meta/classes/update-rc.d.bbclass
index 74053edb8..bddead4a2 100644
--- a/meta/classes/update-rc.d.bbclass
+++ b/meta/classes/update-rc.d.bbclass
@@ -1,5 +1,12 @@
-DEPENDS_append = " update-rc.d"
-RDEPENDS_${PN}_append = " update-rc.d"
+UPDATERCPN ?= "${PN}"
+
+DEPENDS_append = " update-rc.d-native"
+UPDATERCD = "update-rc.d"
+UPDATERCD_virtclass-cross = ""
+UPDATERCD_virtclass-native = ""
+UPDATERCD_virtclass-nativesdk = ""
+
+RDEPENDS_${UPDATERCPN}_append = " ${UPDATERCD}"
INITSCRIPT_PARAMS ?= "defaults"
@@ -26,12 +33,11 @@ update-rc.d $D ${INITSCRIPT_NAME} remove
def update_rc_after_parse(d):
- import bb
- if bb.data.getVar('INITSCRIPT_PACKAGES', d) == None:
- if bb.data.getVar('INITSCRIPT_NAME', d) == None:
- raise bb.build.FuncFailed, "%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % bb.data.getVar('FILE', d)
- if bb.data.getVar('INITSCRIPT_PARAMS', d) == None:
- raise bb.build.FuncFailed, "%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % bb.data.getVar('FILE', d)
+ if d.getVar('INITSCRIPT_PACKAGES') == None:
+ if d.getVar('INITSCRIPT_NAME') == None:
+ raise bb.build.FuncFailed, "%s inherits update-rc.d but doesn't set INITSCRIPT_NAME" % d.getVar('FILE')
+ if d.getVar('INITSCRIPT_PARAMS') == None:
+ raise bb.build.FuncFailed, "%s inherits update-rc.d but doesn't set INITSCRIPT_PARAMS" % d.getVar('FILE')
python __anonymous() {
update_rc_after_parse(d)
@@ -41,32 +47,37 @@ python populate_packages_prepend () {
def update_rcd_package(pkg):
bb.debug(1, 'adding update-rc.d calls to postinst/postrm for %s' % pkg)
localdata = bb.data.createCopy(d)
- overrides = bb.data.getVar("OVERRIDES", localdata, 1)
- bb.data.setVar("OVERRIDES", "%s:%s" % (pkg, overrides), localdata)
+ overrides = localdata.getVar("OVERRIDES", True)
+ localdata.setVar("OVERRIDES", "%s:%s" % (pkg, overrides))
bb.data.update_data(localdata)
- postinst = '#!/bin/sh\n'
- postinst += bb.data.getVar('updatercd_postinst', localdata, 1)
- try:
- postinst += bb.data.getVar('pkg_postinst', localdata, 1)
- except:
- pass
- bb.data.setVar('pkg_postinst_%s' % pkg, postinst, d)
- prerm = bb.data.getVar('pkg_prerm', localdata, 1)
+ """
+ update_rc.d postinst is appended here because pkg_postinst may require to
+ execute on the target. Not doing so may cause update_rc.d postinst invoked
+ twice to cause unwanted warnings.
+ """
+ postinst = localdata.getVar('pkg_postinst', True)
+ if not postinst:
+ postinst = '#!/bin/sh\n'
+ postinst += localdata.getVar('updatercd_postinst', True)
+ d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+ prerm = localdata.getVar('pkg_prerm', True)
if not prerm:
prerm = '#!/bin/sh\n'
- prerm += bb.data.getVar('updatercd_prerm', localdata, 1)
- bb.data.setVar('pkg_prerm_%s' % pkg, prerm, d)
- postrm = bb.data.getVar('pkg_postrm', localdata, 1)
+ prerm += localdata.getVar('updatercd_prerm', True)
+ d.setVar('pkg_prerm_%s' % pkg, prerm)
+
+ postrm = localdata.getVar('pkg_postrm', True)
if not postrm:
postrm = '#!/bin/sh\n'
- postrm += bb.data.getVar('updatercd_postrm', localdata, 1)
- bb.data.setVar('pkg_postrm_%s' % pkg, postrm, d)
+ postrm += localdata.getVar('updatercd_postrm', True)
+ d.setVar('pkg_postrm_%s' % pkg, postrm)
- pkgs = bb.data.getVar('INITSCRIPT_PACKAGES', d, 1)
+ pkgs = d.getVar('INITSCRIPT_PACKAGES', True)
if pkgs == None:
- pkgs = bb.data.getVar('PN', d, 1)
- packages = (bb.data.getVar('PACKAGES', d, 1) or "").split()
+ pkgs = d.getVar('UPDATERCPN', True)
+ packages = (d.getVar('PACKAGES', True) or "").split()
if not pkgs in packages and packages != []:
pkgs = packages[0]
for pkg in pkgs.split():
diff --git a/meta/classes/useradd.bbclass b/meta/classes/useradd.bbclass
new file mode 100644
index 000000000..0ed91ad2c
--- /dev/null
+++ b/meta/classes/useradd.bbclass
@@ -0,0 +1,214 @@
+# base-passwd-cross provides the default passwd and group files in the
+# target sysroot, and shadow -native and -sysroot provide the utilities
+# and support files needed to add and modify user and group accounts
+DEPENDS_append = "${USERADDDEPENDS}"
+USERADDDEPENDS = " base-passwd shadow-native shadow-sysroot shadow"
+USERADDDEPENDS_virtclass-cross = ""
+USERADDDEPENDS_virtclass-native = ""
+USERADDDEPENDS_virtclass-nativesdk = ""
+
+# This preinstall function can be run in four different contexts:
+#
+# a) Before do_install
+# b) At do_populate_sysroot_setscene when installing from sstate packages
+# c) As the preinst script in the target package at do_rootfs time
+# d) As the preinst script in the target package on device as a package upgrade
+#
+useradd_preinst () {
+OPT=""
+SYSROOT=""
+
+if test "x$D" != "x"; then
+ # Installing into a sysroot
+ SYSROOT="$D"
+ OPT="--root $D"
+
+ # Add groups and users defined for all recipe packages
+ GROUPADD_PARAM="${@get_all_cmd_params(d, 'group')}"
+ USERADD_PARAM="${@get_all_cmd_params(d, 'user')}"
+else
+ # Installing onto a target
+ # Add groups and users defined only for this package
+ GROUPADD_PARAM="${GROUPADD_PARAM}"
+ USERADD_PARAM="${USERADD_PARAM}"
+fi
+
+# Perform group additions first, since user additions may depend
+# on these groups existing
+if test "x$GROUPADD_PARAM" != "x"; then
+ echo "Running groupadd commands..."
+ # Invoke multiple instances of groupadd for parameter lists
+ # separated by ';'
+ opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
+ remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
+ while test "x$opts" != "x"; do
+ groupname=`echo "$opts" | awk '{ print $NF }'`
+ group_exists=`grep "^$groupname:" $SYSROOT/etc/group || true`
+ if test "x$group_exists" = "x"; then
+ count=1
+ while true; do
+ eval $PSEUDO groupadd $OPT $opts || true
+ group_exists=`grep "^$groupname:" $SYSROOT/etc/group || true`
+ if test "x$group_exists" = "x"; then
+ # File locking issues can require us to retry the command
+ echo "WARNING: groupadd command did not succeed. Retrying..."
+ sleep 1
+ else
+ break
+ fi
+ count=`expr $count + 1`
+ if test $count = 11; then
+ echo "ERROR: tried running groupadd command 10 times without success, giving up"
+ exit 1
+ fi
+ done
+ else
+ echo "Note: group $groupname already exists, not re-creating it"
+ fi
+
+ if test "x$opts" = "x$remaining"; then
+ break
+ fi
+ opts=`echo "$remaining" | cut -d ';' -f 1`
+ remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ done
+fi
+
+if test "x$USERADD_PARAM" != "x"; then
+ echo "Running useradd commands..."
+ # Invoke multiple instances of useradd for parameter lists
+ # separated by ';'
+ opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
+ remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
+ while test "x$opts" != "x"; do
+ # useradd does not have a -f option, so we have to check if the
+ # username already exists manually
+ username=`echo "$opts" | awk '{ print $NF }'`
+ user_exists=`grep "^$username:" $SYSROOT/etc/passwd || true`
+ if test "x$user_exists" = "x"; then
+ count=1
+ while true; do
+ eval $PSEUDO useradd $OPT $opts || true
+ user_exists=`grep "^$username:" $SYSROOT/etc/passwd || true`
+ if test "x$user_exists" = "x"; then
+ # File locking issues can require us to retry the command
+ echo "WARNING: useradd command did not succeed. Retrying..."
+ sleep 1
+ else
+ break
+ fi
+ count=`expr $count + 1`
+ if test $count = 11; then
+ echo "ERROR: tried running useradd command 10 times without success, giving up"
+ exit 1
+ fi
+ done
+ else
+ echo "Note: username $username already exists, not re-creating it"
+ fi
+
+ if test "x$opts" = "x$remaining"; then
+ break
+ fi
+ opts=`echo "$remaining" | cut -d ';' -f 1`
+ remaining=`echo "$remaining" | cut -d ';' -f 2-`
+ done
+fi
+}
+
+useradd_sysroot () {
+ # Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
+ # at this point so we're explicit about the environment so pseudo can load if
+ # not already present.
+ export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+
+ # Explicitly set $D since it isn't set to anything
+ # before do_install
+ D=${STAGING_DIR_TARGET}
+ useradd_preinst
+}
+
+useradd_sysroot_sstate () {
+ if [ "${BB_CURRENTTASK}" = "package_setscene" ]
+ then
+ useradd_sysroot
+ fi
+}
+
+do_install[prefuncs] += "${SYSROOTFUNC}"
+SYSROOTFUNC = "useradd_sysroot"
+SYSROOTFUNC_virtclass-cross = ""
+SYSROOTFUNC_virtclass-native = ""
+SYSROOTFUNC_virtclass-nativesdk = ""
+SSTATEPREINSTFUNCS += "${SYSROOTPOSTFUNC}"
+SYSROOTPOSTFUNC = "useradd_sysroot_sstate"
+SYSROOTPOSTFUNC_virtclass-cross = ""
+SYSROOTPOSTFUNC_virtclass-native = ""
+SYSROOTPOSTFUNC_virtclass-nativesdk = ""
+
+USERADDSETSCENEDEPS = "base-passwd:do_populate_sysroot_setscene shadow-native:do_populate_sysroot_setscene ${MLPREFIX}shadow-sysroot:do_populate_sysroot_setscene"
+USERADDSETSCENEDEPS_virtclass-cross = ""
+USERADDSETSCENEDEPS_virtclass-native = ""
+USERADDSETSCENEDEPS_virtclass-nativesdk = ""
+do_package_setscene[depends] = "${USERADDSETSCENEDEPS}"
+
+# Recipe parse-time sanity checks
+def update_useradd_after_parse(d):
+ useradd_packages = d.getVar('USERADD_PACKAGES', True)
+
+ if not useradd_packages:
+ raise bb.build.FuncFailed, "%s inherits useradd but doesn't set USERADD_PACKAGES" % d.getVar('FILE')
+
+ for pkg in useradd_packages.split():
+ if not d.getVar('USERADD_PARAM_%s' % pkg, True) and not d.getVar('GROUPADD_PARAM_%s' % pkg, True):
+ raise bb.build.FuncFailed, "%s inherits useradd but doesn't set USERADD_PARAM or GROUPADD_PARAM for package %s" % (d.getVar('FILE'), pkg)
+
+python __anonymous() {
+ update_useradd_after_parse(d)
+}
+
+# Return a single [GROUP|USER]ADD_PARAM formatted string which includes the
+# [group|user]add parameters for all USERADD_PACKAGES in this recipe
+def get_all_cmd_params(d, cmd_type):
+ import string
+
+ param_type = cmd_type.upper() + "ADD_PARAM_%s"
+ params = []
+
+ useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ for pkg in useradd_packages.split():
+ param = d.getVar(param_type % pkg, True)
+ if param:
+ params.append(param)
+
+ return string.join(params, "; ")
+
+# Adds the preinst script into generated packages
+fakeroot python populate_packages_prepend () {
+ def update_useradd_package(pkg):
+ bb.debug(1, 'adding user/group calls to preinst for %s' % pkg)
+
+ """
+ useradd preinst is appended here because pkg_preinst may be
+ required to execute on the target. Not doing so may cause
+ useradd preinst to be invoked twice, causing unwanted warnings.
+ """
+ preinst = d.getVar('pkg_preinst_%s' % pkg, True) or d.getVar('pkg_preinst', True)
+ if not preinst:
+ preinst = '#!/bin/sh\n'
+ preinst += d.getVar('useradd_preinst', True)
+ d.setVar('pkg_preinst_%s' % pkg, preinst)
+
+ # RDEPENDS setup
+ rdepends = d.getVar("RDEPENDS_%s" % pkg, True) or ""
+ rdepends += ' ' + d.getVar('MLPREFIX') + 'base-passwd'
+ rdepends += ' ' + d.getVar('MLPREFIX') + 'shadow'
+ d.setVar("RDEPENDS_%s" % pkg, rdepends)
+
+ # Add the user/group preinstall scripts and RDEPENDS requirements
+ # to packages specified by USERADD_PACKAGES
+ if not bb.data.inherits_class('nativesdk', d):
+ useradd_packages = d.getVar('USERADD_PACKAGES', True) or ""
+ for pkg in useradd_packages.split():
+ update_useradd_package(pkg)
+}
diff --git a/meta/classes/utility-tasks.bbclass b/meta/classes/utility-tasks.bbclass
new file mode 100644
index 000000000..cbb000a1e
--- /dev/null
+++ b/meta/classes/utility-tasks.bbclass
@@ -0,0 +1,65 @@
+addtask listtasks
+do_listtasks[nostamp] = "1"
+python do_listtasks() {
+ import sys
+ # emit variables and shell functions
+ #bb.data.emit_env(sys.__stdout__, d)
+ # emit the metadata which isnt valid shell
+ for e in d.keys():
+ if d.getVarFlag(e, 'task'):
+ bb.plain("%s" % e)
+}
+
+CLEANFUNCS ?= ""
+
+addtask clean
+do_clean[nostamp] = "1"
+python do_clean() {
+ """clear the build and temp directories"""
+ dir = d.expand("${WORKDIR}")
+ bb.note("Removing " + dir)
+ oe.path.remove(dir)
+
+ dir = "%s.*" % bb.data.expand(d.getVar('STAMP'), d)
+ bb.note("Removing " + dir)
+ oe.path.remove(dir)
+
+ for f in (d.getVar('CLEANFUNCS', True) or '').split():
+ bb.build.exec_func(f, d)
+}
+
+addtask checkuri
+do_checkuri[nostamp] = "1"
+python do_checkuri() {
+ src_uri = (d.getVar('SRC_URI', True) or "").split()
+ if len(src_uri) == 0:
+ return
+
+ localdata = bb.data.createCopy(d)
+ bb.data.update_data(localdata)
+
+ try:
+ fetcher = bb.fetch2.Fetch(src_uri, localdata)
+ fetcher.checkstatus()
+ except bb.fetch2.BBFetchException, e:
+ raise bb.build.FuncFailed(e)
+}
+
+addtask checkuriall after do_checkuri
+do_checkuriall[recrdeptask] = "do_checkuri"
+do_checkuriall[nostamp] = "1"
+do_checkuriall() {
+ :
+}
+
+addtask fetchall after do_fetch
+do_fetchall[recrdeptask] = "do_fetch"
+do_fetchall() {
+ :
+}
+
+addtask buildall after do_build
+do_buildall[recrdeptask] = "do_build"
+do_buildall() {
+ :
+}
diff --git a/meta/classes/utils.bbclass b/meta/classes/utils.bbclass
new file mode 100644
index 000000000..fde8f446a
--- /dev/null
+++ b/meta/classes/utils.bbclass
@@ -0,0 +1,385 @@
+# For compatibility
+def base_path_join(a, *p):
+ return oe.path.join(a, *p)
+
+def base_path_relative(src, dest):
+ return oe.path.relative(src, dest)
+
+def base_path_out(path, d):
+ return oe.path.format_display(path, d)
+
+def base_read_file(filename):
+ return oe.utils.read_file(filename)
+
+def base_ifelse(condition, iftrue = True, iffalse = False):
+ return oe.utils.ifelse(condition, iftrue, iffalse)
+
+def base_conditional(variable, checkvalue, truevalue, falsevalue, d):
+ return oe.utils.conditional(variable, checkvalue, truevalue, falsevalue, d)
+
+def base_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ return oe.utils.less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
+
+def base_version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+ return oe.utils.version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d)
+
+def base_contains(variable, checkvalues, truevalue, falsevalue, d):
+ return oe.utils.contains(variable, checkvalues, truevalue, falsevalue, d)
+
+def base_both_contain(variable1, variable2, checkvalue, d):
+ return oe.utils.both_contain(variable1, variable2, checkvalue, d)
+
+def base_prune_suffix(var, suffixes, d):
+ return oe.utils.prune_suffix(var, suffixes, d)
+
+def oe_filter(f, str, d):
+ return oe.utils.str_filter(f, str, d)
+
+def oe_filter_out(f, str, d):
+ return oe.utils.str_filter_out(f, str, d)
+
+def machine_paths(d):
+ """List any existing machine specific filespath directories"""
+ machine = d.getVar("MACHINE", True)
+ filespathpkg = d.getVar("FILESPATHPKG", True).split(":")
+ for basepath in d.getVar("FILESPATHBASE", True).split(":"):
+ for pkgpath in filespathpkg:
+ machinepath = os.path.join(basepath, pkgpath, machine)
+ if os.path.isdir(machinepath):
+ yield machinepath
+
+def is_machine_specific(d):
+ """Determine whether the current recipe is machine specific"""
+ machinepaths = set(machine_paths(d))
+ srcuri = d.getVar("SRC_URI", True).split()
+ for url in srcuri:
+ fetcher = bb.fetch2.Fetch([srcuri], d)
+ if url.startswith("file://"):
+ if any(fetcher.localpath(url).startswith(mp + "/") for mp in machinepaths):
+ return True
+
+def oe_popen_env(d):
+ env = d.getVar("__oe_popen_env", False)
+ if env is None:
+ env = {}
+ for v in d.keys():
+ if d.getVarFlag(v, "export"):
+ env[v] = d.getVar(v, True) or ""
+ d.setVar("__oe_popen_env", env)
+ return env
+
+def oe_run(d, cmd, **kwargs):
+ import oe.process
+ kwargs["env"] = oe_popen_env(d)
+ return oe.process.run(cmd, **kwargs)
+
+def oe_popen(d, cmd, **kwargs):
+ import oe.process
+ kwargs["env"] = oe_popen_env(d)
+ return oe.process.Popen(cmd, **kwargs)
+
+def oe_system(d, cmd, **kwargs):
+ """ Popen based version of os.system. """
+ if not "shell" in kwargs:
+ kwargs["shell"] = True
+ return oe_popen(d, cmd, **kwargs).wait()
+
+oe_soinstall() {
+ # Purpose: Install shared library file and
+ # create the necessary links
+ # Example:
+ #
+ # oe_
+ #
+ #bbnote installing shared library $1 to $2
+ #
+ libname=`basename $1`
+ install -m 755 $1 $2/$libname
+ sonamelink=`${HOST_PREFIX}readelf -d $1 |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ solink=`echo $libname | sed -e 's/\.so\..*/.so/'`
+ ln -sf $libname $2/$sonamelink
+ ln -sf $libname $2/$solink
+}
+
+oe_libinstall() {
+ # Purpose: Install a library, in all its forms
+ # Example
+ #
+ # oe_libinstall libltdl ${STAGING_LIBDIR}/
+ # oe_libinstall -C src/libblah libblah ${D}/${libdir}/
+ dir=""
+ libtool=""
+ silent=""
+ require_static=""
+ require_shared=""
+ staging_install=""
+ while [ "$#" -gt 0 ]; do
+ case "$1" in
+ -C)
+ shift
+ dir="$1"
+ ;;
+ -s)
+ silent=1
+ ;;
+ -a)
+ require_static=1
+ ;;
+ -so)
+ require_shared=1
+ ;;
+ -*)
+ bbfatal "oe_libinstall: unknown option: $1"
+ ;;
+ *)
+ break;
+ ;;
+ esac
+ shift
+ done
+
+ libname="$1"
+ shift
+ destpath="$1"
+ if [ -z "$destpath" ]; then
+ bbfatal "oe_libinstall: no destination path specified"
+ fi
+ if echo "$destpath/" | egrep '^${STAGING_LIBDIR}/' >/dev/null
+ then
+ staging_install=1
+ fi
+
+ __runcmd () {
+ if [ -z "$silent" ]; then
+ echo >&2 "oe_libinstall: $*"
+ fi
+ $*
+ }
+
+ if [ -z "$dir" ]; then
+ dir=`pwd`
+ fi
+
+ dotlai=$libname.lai
+
+ # Sanity check that the libname.lai is unique
+ number_of_files=`(cd $dir; find . -name "$dotlai") | wc -l`
+ if [ $number_of_files -gt 1 ]; then
+ bbfatal "oe_libinstall: $dotlai is not unique in $dir"
+ fi
+
+
+ dir=$dir`(cd $dir;find . -name "$dotlai") | sed "s/^\.//;s/\/$dotlai\$//;q"`
+ olddir=`pwd`
+ __runcmd cd $dir
+
+ lafile=$libname.la
+
+ # If such file doesn't exist, try to cut version suffix
+ if [ ! -f "$lafile" ]; then
+ libname1=`echo "$libname" | sed 's/-[0-9.]*$//'`
+ lafile1=$libname.la
+ if [ -f "$lafile1" ]; then
+ libname=$libname1
+ lafile=$lafile1
+ fi
+ fi
+
+ if [ -f "$lafile" ]; then
+ # libtool archive
+ eval `cat $lafile|grep "^library_names="`
+ libtool=1
+ else
+ library_names="$libname.so* $libname.dll.a $libname.*.dylib"
+ fi
+
+ __runcmd install -d $destpath/
+ dota=$libname.a
+ if [ -f "$dota" -o -n "$require_static" ]; then
+ rm -f $destpath/$dota
+ __runcmd install -m 0644 $dota $destpath/
+ fi
+ if [ -f "$dotlai" -a -n "$libtool" ]; then
+ rm -f $destpath/$libname.la
+ __runcmd install -m 0644 $dotlai $destpath/$libname.la
+ fi
+
+ for name in $library_names; do
+ files=`eval echo $name`
+ for f in $files; do
+ if [ ! -e "$f" ]; then
+ if [ -n "$libtool" ]; then
+ bbfatal "oe_libinstall: $dir/$f not found."
+ fi
+ elif [ -L "$f" ]; then
+ __runcmd cp -P "$f" $destpath/
+ elif [ ! -L "$f" ]; then
+ libfile="$f"
+ rm -f $destpath/$libfile
+ __runcmd install -m 0755 $libfile $destpath/
+ fi
+ done
+ done
+
+ if [ -z "$libfile" ]; then
+ if [ -n "$require_shared" ]; then
+ bbfatal "oe_libinstall: unable to locate shared library"
+ fi
+ elif [ -z "$libtool" ]; then
+ # special case hack for non-libtool .so.#.#.# links
+ baselibfile=`basename "$libfile"`
+ if (echo $baselibfile | grep -qE '^lib.*\.so\.[0-9.]*$'); then
+ sonamelink=`${HOST_PREFIX}readelf -d $libfile |grep 'Library soname:' |sed -e 's/.*\[\(.*\)\].*/\1/'`
+ solink=`echo $baselibfile | sed -e 's/\.so\..*/.so/'`
+ if [ -n "$sonamelink" -a x"$baselibfile" != x"$sonamelink" ]; then
+ __runcmd ln -sf $baselibfile $destpath/$sonamelink
+ fi
+ __runcmd ln -sf $baselibfile $destpath/$solink
+ fi
+ fi
+
+ __runcmd cd "$olddir"
+}
+
+oe_machinstall() {
+ # Purpose: Install machine dependent files, if available
+ # If not available, check if there is a default
+ # If no default, just touch the destination
+ # Example:
+ # $1 $2 $3 $4
+ # oe_machinstall -m 0644 fstab ${D}/etc/fstab
+ #
+ # TODO: Check argument number?
+ #
+ filename=`basename $3`
+ dirname=`dirname $3`
+
+ for o in `echo ${OVERRIDES} | tr ':' ' '`; do
+ if [ -e $dirname/$o/$filename ]; then
+ bbnote $dirname/$o/$filename present, installing to $4
+ install $1 $2 $dirname/$o/$filename $4
+ return
+ fi
+ done
+# bbnote overrides specific file NOT present, trying default=$3...
+ if [ -e $3 ]; then
+ bbnote $3 present, installing to $4
+ install $1 $2 $3 $4
+ else
+ bbnote $3 NOT present, touching empty $4
+ touch $4
+ fi
+}
+
+create_cmdline_wrapper () {
+ # Create a wrapper script
+ #
+ # These are useful to work around relocation issues, by setting environment
+ # variables which point to paths in the filesystem.
+ #
+ # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
+
+ cmd=$1
+ shift
+
+ echo "Generating wrapper script for $cmd"
+
+ mv $cmd $cmd.real
+ cmdname=`basename $cmd`.real
+ cat <<END >$cmd
+#!/bin/sh
+realpath=\`readlink -fn \$0\`
+exec \`dirname \$realpath\`/$cmdname $@ "\$@"
+END
+ chmod +x $cmd
+}
+
+create_wrapper () {
+ # Create a wrapper script
+ #
+ # These are useful to work around relocation issues, by setting environment
+ # variables which point to paths in the filesystem.
+ #
+ # Usage: create_wrapper FILENAME [[VAR=VALUE]..]
+
+ cmd=$1
+ shift
+
+ echo "Generating wrapper script for $cmd"
+
+ mv $cmd $cmd.real
+ cmdname=`basename $cmd`.real
+ cat <<END >$cmd
+#!/bin/sh
+realpath=\`readlink -fn \$0\`
+exec env $@ \`dirname \$realpath\`/$cmdname "\$@"
+END
+ chmod +x $cmd
+}
+
+def check_app_exists(app, d):
+ from bb import which, data
+
+ app = data.expand(app, d)
+ path = data.getVar('PATH', d, 1)
+ return bool(which(path, app))
+
+def explode_deps(s):
+ return bb.utils.explode_deps(s)
+
+def base_set_filespath(path, d):
+ filespath = []
+ extrapaths = (d.getVar("FILESEXTRAPATHS", True) or "")
+ # Don't prepend empty strings to the path list
+ if extrapaths != "":
+ path = extrapaths.split(":") + path
+ # The ":" ensures we have an 'empty' override
+ overrides = (d.getVar("OVERRIDES", True) or "") + ":"
+ for p in path:
+ if p != "":
+ for o in overrides.split(":"):
+ filespath.append(os.path.join(p, o))
+ return ":".join(filespath)
+
+def extend_variants(d, var, extend, delim=':'):
+ """Return a string of all bb class extend variants for the given extend"""
+ variants = []
+ whole = d.getVar(var, True) or ""
+ for ext in whole.split():
+ eext = ext.split(delim)
+ if len(eext) > 1 and eext[0] == extend:
+ variants.append(eext[1])
+ return " ".join(variants)
+
+def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
+ """Return a string of all ${var} in all multilib tune configuration"""
+ values = []
+ value = d.getVar(var, True) or ""
+ if value != "":
+ if need_split:
+ for item in value.split(delim):
+ values.append(item)
+ else:
+ values.append(value)
+ variants = d.getVar("MULTILIB_VARIANTS", True) or ""
+ for item in variants.split():
+ localdata = bb.data.createCopy(d)
+ overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item
+ localdata.setVar("OVERRIDES", overrides)
+ bb.data.update_data(localdata)
+ value = localdata.getVar(var, True) or ""
+ if value != "":
+ if need_split:
+ for item in value.split(delim):
+ values.append(item)
+ else:
+ values.append(value)
+ if unique:
+ #we do this to keep order as much as possible
+ ret = []
+ for value in values:
+ if not value in ret:
+ ret.append(value)
+ else:
+ ret = values
+ return " ".join(ret)
diff --git a/meta/classes/xfce.bbclass b/meta/classes/xfce.bbclass
deleted file mode 100644
index ecc00825b..000000000
--- a/meta/classes/xfce.bbclass
+++ /dev/null
@@ -1,20 +0,0 @@
-# xfce.oeclass
-# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
-# Released under the MIT license (see packages/COPYING)
-
-# Global class to make it easier to maintain XFCE packages
-
-HOMEPAGE = "http://www.xfce.org"
-LICENSE = "LGPL-2"
-DEPENDS += "startup-notification"
-
-SRC_URI = "http://www.us.xfce.org/archive/xfce-${PV}/src/${PN}-${PV}.tar.bz2"
-
-inherit autotools
-
-EXTRA_OECONF += "--with-pluginsdir=${libdir}/xfce4/panel-plugins/"
-
-# FIXME: Put icons in their own package too?
-
-FILES_${PN} += "${datadir}/icons/* ${datadir}/applications/* ${libdir}/xfce4/modules/*.so*"
-FILES_${PN}-doc += "${datadir}/xfce4/doc"
diff --git a/meta/classes/xlibs.bbclass b/meta/classes/xlibs.bbclass
deleted file mode 100644
index ae8f928f1..000000000
--- a/meta/classes/xlibs.bbclass
+++ /dev/null
@@ -1,15 +0,0 @@
-LICENSE= "BSD-X"
-SECTION = "x11/libs"
-
-XLIBS_CVS = "${FREEDESKTOP_CVS}/xlibs"
-
-inherit autotools pkgconfig
-
-do_stage() {
- oe_runmake install prefix=${STAGING_DIR_HOST}${layout_prefix} \
- bindir=${STAGING_BINDIR} \
- includedir=${STAGING_INCDIR} \
- libdir=${STAGING_LIBDIR} \
- datadir=${STAGING_DATADIR} \
- mandir=${STAGING_DIR_HOST}${layout_mandir}
-}