summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/README2
-rwxr-xr-xscripts/bitbake161
-rwxr-xr-xscripts/bitbake-prserv-tool63
-rwxr-xr-xscripts/buildhistory-diff58
-rwxr-xr-xscripts/cleanup-workdir150
-rwxr-xr-xscripts/combo-layer422
-rwxr-xr-xscripts/combo-layer-hook-default.sh13
-rw-r--r--scripts/combo-layer.conf.example56
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix-plot.sh137
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix.sh78
-rwxr-xr-xscripts/contrib/bbvars.py186
-rwxr-xr-xscripts/contrib/ddimage87
-rwxr-xr-xscripts/contrib/documentation-audit.sh93
-rwxr-xr-xscripts/contrib/python/generate-manifest-2.7.py385
-rwxr-xr-xscripts/contrib/test_build_time.sh237
-rwxr-xr-xscripts/contrib/test_build_time_worker.sh37
-rwxr-xr-xscripts/create-lsb-image228
-rwxr-xr-xscripts/create-pull-request231
-rwxr-xr-xscripts/create-recipe1926
-rwxr-xr-xscripts/gen-site-config53
-rwxr-xr-xscripts/help2man3
-rwxr-xr-xscripts/hob6
-rwxr-xr-xscripts/jhbuild/jhbuild2oe.py28
-rw-r--r--scripts/multilib_header_wrapper.h55
-rwxr-xr-xscripts/native-intercept/chown2
-rwxr-xr-xscripts/oe-buildenv-internal78
-rwxr-xr-xscripts/oe-find-native-sysroot72
-rwxr-xr-xscripts/oe-git-proxy-command (renamed from scripts/poky-git-proxy-command)0
-rwxr-xr-xscripts/oe-git-proxy-socks-command23
-rw-r--r--scripts/oe-git-proxy-socks.c (renamed from scripts/poky-git-proxy-socks.c)0
-rwxr-xr-xscripts/oe-setup-builddir126
-rwxr-xr-xscripts/oe-setup-rpmrepo96
-rwxr-xr-xscripts/oe-trim-schemas49
-rwxr-xr-xscripts/poky-chroot-run86
-rwxr-xr-xscripts/poky-chroot-setup31
-rwxr-xr-xscripts/poky-env-internal122
-rwxr-xr-xscripts/poky-git-proxy-socks-command2
-rwxr-xr-xscripts/poky-nokia800-flashutil64
-rwxr-xr-xscripts/poky-qemu62
-rwxr-xr-xscripts/poky-qemu-ifdown27
-rwxr-xr-xscripts/poky-qemu-ifup37
-rwxr-xr-xscripts/poky-qemu-internal248
-rw-r--r--scripts/poky-qemu.README90
-rwxr-xr-xscripts/qemuimage-testlib778
-rwxr-xr-xscripts/qemuimage-tests/sanity/boot29
-rwxr-xr-xscripts/qemuimage-tests/sanity/compiler52
-rwxr-xr-xscripts/qemuimage-tests/sanity/connman52
-rwxr-xr-xscripts/qemuimage-tests/sanity/dmesg52
-rwxr-xr-xscripts/qemuimage-tests/sanity/rpm_query52
-rwxr-xr-xscripts/qemuimage-tests/sanity/scp71
-rwxr-xr-xscripts/qemuimage-tests/sanity/shutdown76
-rwxr-xr-xscripts/qemuimage-tests/sanity/ssh39
-rwxr-xr-xscripts/qemuimage-tests/sanity/zypper_help52
-rwxr-xr-xscripts/qemuimage-tests/sanity/zypper_search52
-rw-r--r--scripts/qemuimage-tests/scenario/qemuarm/core-image-lsb7
-rw-r--r--scripts/qemuimage-tests/scenario/qemuarm/core-image-minimal1
-rw-r--r--scripts/qemuimage-tests/scenario/qemuarm/core-image-sato8
-rw-r--r--scripts/qemuimage-tests/scenario/qemuarm/core-image-sato-sdk9
-rw-r--r--scripts/qemuimage-tests/scenario/qemuarm/meta-toolchain-gmae3
-rw-r--r--scripts/qemuimage-tests/scenario/qemumips/core-image-lsb7
-rw-r--r--scripts/qemuimage-tests/scenario/qemumips/core-image-minimal1
-rw-r--r--scripts/qemuimage-tests/scenario/qemumips/core-image-sato8
-rw-r--r--scripts/qemuimage-tests/scenario/qemumips/core-image-sato-sdk9
-rw-r--r--scripts/qemuimage-tests/scenario/qemumips/meta-toolchain-gmae3
-rw-r--r--scripts/qemuimage-tests/scenario/qemuppc/core-image-lsb7
-rw-r--r--scripts/qemuimage-tests/scenario/qemuppc/core-image-minimal1
-rw-r--r--scripts/qemuimage-tests/scenario/qemuppc/core-image-sato8
-rw-r--r--scripts/qemuimage-tests/scenario/qemuppc/core-image-sato-sdk9
-rw-r--r--scripts/qemuimage-tests/scenario/qemuppc/meta-toolchain-gmae3
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86-64/core-image-lsb7
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86-64/core-image-minimal1
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato8
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato-sdk9
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86-64/meta-toolchain-gmae3
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86/core-image-lsb7
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86/core-image-minimal1
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86/core-image-sato8
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86/core-image-sato-sdk9
-rw-r--r--scripts/qemuimage-tests/scenario/qemux86/meta-toolchain-gmae3
-rwxr-xr-xscripts/qemuimage-tests/toolchain/cvs31
-rwxr-xr-xscripts/qemuimage-tests/toolchain/iptables31
-rwxr-xr-xscripts/qemuimage-tests/toolchain/sudoku-savant31
-rw-r--r--scripts/qemuimage-tests/tools/compiler_test.sh137
-rw-r--r--scripts/qemuimage-tests/tools/connman_test.sh75
-rw-r--r--scripts/qemuimage-tests/tools/dmesg.sh26
-rw-r--r--scripts/qemuimage-tests/tools/rpm_test.sh45
-rw-r--r--scripts/qemuimage-tests/tools/zypper_test.sh45
-rw-r--r--scripts/rootfs_rpm-extract-postinst.awk11
-rwxr-xr-xscripts/rpm-createsolvedb.py63
-rwxr-xr-xscripts/rpm2cpio.sh53
-rwxr-xr-xscripts/runqemu427
-rwxr-xr-xscripts/runqemu-addptable2image (renamed from scripts/poky-addptable2image)0
-rwxr-xr-xscripts/runqemu-export-rootfs191
-rwxr-xr-xscripts/runqemu-extract-sdk100
-rwxr-xr-xscripts/runqemu-gen-tapdevs89
-rwxr-xr-xscripts/runqemu-ifdown52
-rwxr-xr-xscripts/runqemu-ifup114
-rwxr-xr-xscripts/runqemu-internal505
-rw-r--r--scripts/runqemu.README42
-rwxr-xr-xscripts/send-pull-request173
-rwxr-xr-xscripts/sstate-cache-management.sh326
-rwxr-xr-xscripts/swabber-strace-attach30
-rwxr-xr-xscripts/test-reexec123
103 files changed, 9203 insertions, 872 deletions
diff --git a/scripts/README b/scripts/README
index 5363c9b4e..1b8d12724 100644
--- a/scripts/README
+++ b/scripts/README
@@ -1 +1 @@
-This directory contains Various useful scripts for working with poky
+This directory contains Various useful scripts for working with OE builds
diff --git a/scripts/bitbake b/scripts/bitbake
new file mode 100755
index 000000000..3772d826f
--- /dev/null
+++ b/scripts/bitbake
@@ -0,0 +1,161 @@
+#!/bin/sh
+
+# This is the bitbake wrapper script that ensures everything is set up
+# correctly in the environment, builds pseudo separately if it hasn't
+# already been built, and then runs bitbake within pseudo.
+
+export BBFETCH2=True
+export BB_ENV_EXTRAWHITE="PSEUDO_BUILD PSEUDO_DISABLED $BB_ENV_EXTRAWHITE"
+
+# For certain operations (i.e. that won't be actually running any tasks)
+# we don't need pseudo
+NO_BUILD_OPTS="--version -h --help -p --parse-only -s --show-versions -e --environment -g --graphviz"
+
+# Some options are useful to pass through to the initial pseudo build if
+# that needs to be run (for debugging)
+PASSTHROUGH_OPTS="-D -DD -DDD -DDDD -v"
+
+needpseudo="1"
+for opt in $@; do
+for key in $NO_BUILD_OPTS; do
+ if [ $opt = $key ]
+ then
+ needpseudo="0"
+ break
+ fi
+done
+[ $needpseudo = "0" ] && break
+done
+
+# Make sure we're not using python v3.x. This check can't go into
+# sanity.bbclass because bitbake's source code doesn't even pass
+# parsing stage when used with python v3, so we catch it here so we
+# can offer a meaningful error message.
+py_v3_check=`/usr/bin/env python --version 2>&1 | grep "Python 3"`
+if [ "$py_v3_check" != "" ]; then
+ echo "Bitbake is not compatible with python v3"
+ echo "Please set up python v2 as your default python interpreter"
+ exit 1
+fi
+
+# Similarly, we now have code that doesn't parse correctly with older
+# versions of Python, and rather than fixing that and being eternally
+# vigilant for any other new feature use, just check the version here.
+py_v26_check=`python -c 'import sys; print sys.version_info >= (2,6,0)'`
+if [ "$py_v26_check" != "True" ]; then
+ echo "BitBake requires Python 2.6 or later"
+ exit 1
+fi
+
+if [ ! -e conf/bblayers.conf ] ; then
+ BDPRINT=""
+ [ -n "$BUILDDIR" ] && BDPRINT=": $BUILDDIR"
+ echo "Unable to find conf/bblayers.conf"
+ echo "BitBake must be run from within your build directory$BDPRINT"
+ exit 1
+elif [ -z "$BUILDDIR" ] ; then
+ BUILDDIR="`pwd`"
+fi
+
+needtar="1"
+TARVERSION=`tar --version | head -n 1 | cut -d ' ' -f 4`
+float_test() {
+ echo | awk 'END { exit ( !( '"$1"')); }'
+}
+
+# Tar version 1.24 and onwards handle overwriting symlinks correctly
+# but earlier versions do not; this needs to work properly for sstate
+float_test "$TARVERSION > 1.23" && needtar="0"
+
+buildpseudo="1"
+if [ $needpseudo = "1" ]; then
+ if [ -e "$BUILDDIR/pseudodone" ]; then
+ PSEUDOBINDIR=`cat $BUILDDIR/pseudodone`
+ else
+ PSEUDOBINDIR=`bitbake -e | grep STAGING_BINDIR_NATIVE=\" | cut -d '=' -f2 | cut -d '"' -f2`
+ fi
+ if [ -e "$PSEUDOBINDIR/pseudo" ]; then
+ buildpseudo="0"
+ fi
+
+ # Verify that the pseudo recipes are older then the pseudodone file
+ PSEUDO_RECIPE="`dirname $0`/../meta/recipes-devtools/pseudo"
+ if [ $buildpseudo -eq 0 ] && [ ! -d "$PSEUDO_RECIPE" ]; then
+ echo "Unable to verify if pseudo-native is up to date..." >&2
+ elif [ $buildpseudo -eq 0 ]; then
+ PSEUDO_NEWER=`find $PSEUDO_RECIPE -type f -newer $BUILDDIR/pseudodone`
+ if [ -n "$PSEUDO_NEWER" ]; then
+ buildpseudo="2"
+ fi
+ fi
+ if [ $buildpseudo = "0" -a ! -e "$BUILDDIR/pseudodone" ] ; then
+ echo $PSEUDOBINDIR > $BUILDDIR/pseudodone
+ fi
+fi
+
+# If tar is already built, we don't want to do it again...
+if [ -e "$PSEUDOBINDIR/tar" -a "$needtar" = "1" ]; then
+ needtar="0"
+fi
+
+if [ $needpseudo = "0" ]; then
+ buildpseudo="0"
+fi
+
+# If pseudo-native is an argument, assume the user wants to build pseudo-native!
+if [ $needpseudo != "0" -a $buildpseudo -eq 0 ]; then
+ for opt in $@; do
+ if [ "$opt" = "pseudo-native" ]; then
+ buildpseudo="3"
+ break
+ fi
+ done
+fi
+
+OLDPATH=$PATH
+export PATH=`echo $PATH | sed s#[^:]*/scripts:##`
+if [ $buildpseudo -gt 0 ]; then
+ [ $buildpseudo -eq 1 ] && echo "Pseudo is not present but is required, building this first before the main build"
+ [ $buildpseudo -eq 2 ] && echo "Pseudo may be out of date, rebuilding pseudo before the main build"
+ [ $buildpseudo -eq 3 ] && echo "Building pseudo-native before main build"
+ export PSEUDO_BUILD=1
+ TARTARGET="tar-replacement-native"
+ if [ $needtar = "0" ]; then
+ TARTARGET=""
+ fi
+ # Pass through debug options
+ additionalopts=""
+ for opt in $@; do
+ for key in $PASSTHROUGH_OPTS; do
+ if [ $opt = $key ]
+ then
+ additionalopts="$additionalopts $opt"
+ break
+ fi
+ done
+ done
+ bitbake pseudo-native $TARTARGET $additionalopts -c populate_sysroot
+ ret=$?
+ if [ "$ret" != "0" ]; then
+ exit 1
+ fi
+ PSEUDOBINDIR=`bitbake -e | grep STAGING_BINDIR_NATIVE=\" | cut -d '=' -f2 | cut -d '"' -f2`
+ ret=$?
+ if [ "$ret" != "0" ]; then
+ exit 1
+ fi
+ echo $PSEUDOBINDIR > $BUILDDIR/pseudodone
+ # This needs to exist in case pseudo has to log somewhere
+ mkdir -p $PSEUDOBINDIR/../../var/pseudo
+fi
+BITBAKE=`which bitbake`
+export PATH=$OLDPATH
+if [ $needpseudo = "1" ]; then
+ export PSEUDO_BUILD=2
+ PSEUDO_BINDIR=$PSEUDOBINDIR PSEUDO_LIBDIR=$PSEUDOBINDIR/../lib/pseudo/lib PSEUDO_PREFIX=$PSEUDOBINDIR/../../ PSEUDO_DISABLED=1 $PSEUDOBINDIR/pseudo $BITBAKE $@
+else
+ export PSEUDO_BUILD=0
+ $BITBAKE $@
+fi
+ret=$?
+exit $ret
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool
new file mode 100755
index 000000000..f3855df0c
--- /dev/null
+++ b/scripts/bitbake-prserv-tool
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+help ()
+{
+ base=`basename $0`
+ echo -e "Usage: $base command"
+ echo "Avaliable commands:"
+ echo -e "\texport <file>: export and lock down the AUTOPR values from the PR service into a file for release."
+ echo -e "\timport <file>: import the AUTOPR values from the exported file into the PR service."
+}
+
+clean_cache()
+{
+ s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"`
+ if [ "x${s}" != "x" ]; then
+ rm -rf ${s}
+ fi
+}
+
+do_export ()
+{
+ file=$1
+ [ "x${file}" == "x" ] && help && exit 1
+ rm -f ${file}
+
+ clean_cache
+ bitbake -R conf/prexport.conf -p
+ s=`bitbake -R conf/prexport.conf -e | grep ^PRSERV_DUMPFILE= | cut -f2 -d\"`
+ if [ "x${s}" != "x" ];
+ then
+ [ -e $s ] && mv -f $s $file && echo "Exporting to file $file succeeded!"
+ return 0
+ fi
+ echo "Exporting to file $file failed!"
+ return 1
+}
+
+do_import ()
+{
+ file=$1
+ [ "x${file}" == "x" ] && help && exit 1
+
+ clean_cache
+ bitbake -R conf/primport.conf -R $file -p
+ ret=$?
+ [ $ret -eq 0 ] && echo "Importing from file $file succeeded!" || echo "Importing from file $file failed!"
+ return $ret
+}
+
+[ $# -eq 0 ] && help && exit 1
+
+case $1 in
+export)
+ do_export $2
+ ;;
+import)
+ do_import $2
+ ;;
+*)
+ help
+ exit 1
+ ;;
+esac
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff
new file mode 100755
index 000000000..9936a4b60
--- /dev/null
+++ b/scripts/buildhistory-diff
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Report significant differences in the buildhistory repository since a specific revision
+#
+# Copyright (C) 2012 Intel Corporation
+# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+
+import sys
+import os
+
+# Ensure PythonGit is installed (buildhistory_analysis needs it)
+try:
+ import git
+except ImportError:
+ print("Please install PythonGit 0.3.1 or later in order to use this script")
+ sys.exit(1)
+
+
+def main():
+ if (len(sys.argv) < 3):
+ print("Report significant differences in the buildhistory repository")
+ print("Syntax: %s <buildhistory-path> <since-revision> [to-revision]" % os.path.basename(sys.argv[0]))
+ print("If to-revision is not specified, it defaults to HEAD")
+ sys.exit(1)
+
+ # Set path to OE lib dir so we can import the buildhistory_analysis module
+ basepath = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])) + '/..')
+ newpath = basepath + '/meta/lib'
+ # Set path to bitbake lib dir so the buildhistory_analysis module can load bb.utils
+ if os.path.exists(basepath + '/bitbake/lib/bb'):
+ bitbakepath = basepath + '/bitbake'
+ else:
+ # look for bitbake/bin dir in PATH
+ bitbakepath = None
+ for pth in os.environ['PATH'].split(':'):
+ if os.path.exists(os.path.join(pth, '../lib/bb')):
+ bitbakepath = os.path.abspath(os.path.join(pth, '..'))
+ break
+ if not bitbakepath:
+ print("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+
+ sys.path.extend([newpath, bitbakepath + '/lib'])
+ import oe.buildhistory_analysis
+
+ if len(sys.argv) > 3:
+ torev = sys.argv[3]
+ else:
+ torev = 'HEAD'
+ changes = oe.buildhistory_analysis.process_changes(sys.argv[1], sys.argv[2], torev)
+ for chg in changes:
+ print('%s' % chg)
+
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/cleanup-workdir b/scripts/cleanup-workdir
new file mode 100755
index 000000000..b77e8c664
--- /dev/null
+++ b/scripts/cleanup-workdir
@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import os
+import sys
+import optparse
+import re
+import commands
+import shutil
+
+versions = {}
+obsolete_dirs = []
+parser = None
+
+def err_quit(msg):
+ print msg
+ parser.print_usage()
+ sys.exit(1)
+
+def parse_version(verstr):
+ elems = verstr.split(':')
+ epoch = elems[0]
+ if len(epoch) == 0:
+ return elems[1]
+ else:
+ return epoch + '_' + elems[1]
+
+def parse_dir(match, pkgabsdir):
+ pkg_name = match.group(1)
+ pkg_version = match.group(2)
+ if pkg_name in versions:
+ if pkg_version != versions[pkg_name]:
+ obsolete_dirs.append(pkgabsdir)
+ return True
+ return False
+
+def main():
+ global parser
+ parser = optparse.OptionParser(
+ usage = """%prog
+
+Remove the obsolete packages' build directories in WORKDIR.
+This script must be ran under BUILDDIR after source file \"oe-init-build-env\".""")
+
+ options, args = parser.parse_args(sys.argv)
+
+ builddir = commands.getoutput('echo $BUILDDIR')
+ if len(builddir) == 0:
+ err_quit("Please source file \"oe-init-build-env\" first.\n")
+
+ if os.getcwd() != builddir:
+ err_quit("Please run %s under: %s\n" % (os.path.basename(args[0]), builddir))
+
+ print 'Updating bitbake caches...'
+ cmd = "bitbake -s"
+ (ret, output) = commands.getstatusoutput(cmd)
+ if ret != 0:
+ print "Execute 'bitbake -s' failed. Can't get packages' versions."
+ return 1
+
+ output = output.split('\n')
+ index = 0
+ while len(output[index]) > 0:
+ index += 1
+ alllines = output[index+1:]
+
+ for line in alllines:
+ # empty again means end of the versions output
+ if len(line) == 0:
+ break
+ line = line.strip()
+ line = re.sub('\s+', ' ', line)
+ elems = line.split(' ')
+ if len(elems) == 2:
+ version = parse_version(elems[1])
+ else:
+ version = parse_version(elems[2])
+ versions[elems[0]] = version
+
+ cmd = "bitbake -e | grep ^TMPDIR"
+ (ret, output) = commands.getstatusoutput(cmd)
+ if ret != 0:
+ print "Execute 'bitbke -e' failed. Can't get TMPDIR."
+ return 1
+
+ tmpdir = output.split('"')[1]
+ workdir = os.path.join(tmpdir, 'work')
+ if not os.path.exists(workdir):
+ print "WORKDIR %s does NOT exist. Quit." % workdir
+ return 1
+
+ for archdir in os.listdir(workdir):
+ archdir = os.path.join(workdir, archdir)
+ if not os.path.isdir(archdir):
+ pass
+
+ for pkgdir in sorted(os.listdir(archdir)):
+ pkgabsdir = os.path.join(archdir, pkgdir)
+ if not os.path.isdir(pkgabsdir):
+ pass
+
+ # parse the package directory names
+ # parse native/nativesdk packages first
+ match = re.match('(.*?-native.*?)-(.*)', pkgdir)
+ if match and parse_dir(match, pkgabsdir):
+ continue
+
+ # parse package names which ends with numbers such as 'glib-2.0'
+ match = re.match('(.*?-[\.\d]+)-(\d.*)', pkgdir)
+ if match and parse_dir(match, pkgabsdir):
+ continue
+
+ # other packages
+ match = re.match('(.*?)-(\d.*)', pkgdir)
+ if match and parse_dir(match, pkgabsdir):
+ continue
+
+ for d in obsolete_dirs:
+ print "Deleleting %s" % d
+ shutil.rmtree(d, True)
+
+ if len(obsolete_dirs):
+ print '\nTotal %d items.' % len(obsolete_dirs)
+ else:
+ print '\nNo obsolete directory found under %s.' % workdir
+
+ return 0
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 2
+ import traceback
+ traceback.print_exc(3)
+ sys.exit(ret)
diff --git a/scripts/combo-layer b/scripts/combo-layer
new file mode 100755
index 000000000..73d61cce4
--- /dev/null
+++ b/scripts/combo-layer
@@ -0,0 +1,422 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright 2011 Intel Corporation
+# Authored-by: Yu Ke <ke.yu@intel.com>
+# Paul Eggleton <paul.eggleton@intel.com>
+# Richard Purdie <richard.purdie@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+import os, sys
+import optparse
+import logging
+import subprocess
+import ConfigParser
+
+__version__ = "0.2.1"
+
+def logger_create():
+ logger = logging.getLogger("")
+ loggerhandler = logging.StreamHandler()
+ loggerhandler.setFormatter(logging.Formatter("[%(asctime)s] %(message)s","%H:%M:%S"))
+ logger.addHandler(loggerhandler)
+ logger.setLevel(logging.INFO)
+ return logger
+
+logger = logger_create()
+
+class Configuration(object):
+ """
+ Manages the configuration
+
+ For an example config file, see combo-layer.conf.example
+
+ """
+ def __init__(self, options):
+ for key, val in options.__dict__.items():
+ setattr(self, key, val)
+ self.parser = ConfigParser.ConfigParser()
+ self.parser.readfp(open(self.conffile))
+ self.repos = {}
+ for repo in self.parser.sections():
+ self.repos[repo] = {}
+ for (name, value) in self.parser.items(repo):
+ if value.startswith("@"):
+ self.repos[repo][name] = eval(value.strip("@"))
+ else:
+ self.repos[repo][name] = value
+
+ def update(self, repo, option, value):
+ self.parser.set(repo, option, value)
+ self.parser.write(open(self.conffile, "w"))
+
+ def sanity_check(self):
+ required_options=["src_uri", "local_repo_dir", "dest_dir", "last_revision"]
+ msg = ""
+ for name in self.repos:
+ for option in required_options:
+ if option not in self.repos[name]:
+ msg = "%s\nOption %s is not defined for component %s" %(msg, option, name)
+ if msg != "":
+ logger.error("configuration file %s has the following error: %s" % (self.conffile,msg))
+ sys.exit(1)
+
+ # filterdiff is required by action_splitpatch, so check its availability
+ if subprocess.call("which filterdiff &>/dev/null", shell=True) != 0:
+ logger.error("ERROR: patchutils package is missing, please install it (e.g. # apt-get install patchutils)")
+ sys.exit(1)
+
+def runcmd(cmd,destdir=None,printerr=True):
+ """
+ execute command, raise CalledProcessError if fail
+ return output if succeed
+ """
+ logger.debug("run cmd '%s' in %s" % (cmd, os.getcwd() if destdir is None else destdir))
+ out = os.tmpfile()
+ try:
+ subprocess.check_call(cmd, stdout=out, stderr=out, cwd=destdir, shell=True)
+ except subprocess.CalledProcessError,e:
+ out.seek(0)
+ if printerr:
+ logger.error("%s" % out.read())
+ raise e
+
+ out.seek(0)
+ output = out.read()
+ logger.debug("output: %s" % output )
+ return output
+
+def action_init(conf, args):
+ """
+ Clone component repositories
+ Check git is initialised; if not, copy initial data from component repos
+ """
+ for name in conf.repos:
+ ldir = conf.repos[name]['local_repo_dir']
+ if not os.path.exists(ldir):
+ logger.info("cloning %s to %s" %(conf.repos[name]['src_uri'], ldir))
+ subprocess.check_call("git clone %s %s" % (conf.repos[name]['src_uri'], ldir), shell=True)
+ branch = conf.repos[name].get('branch', "master")
+ runcmd("git checkout %s" % branch, ldir)
+ if not os.path.exists(".git"):
+ runcmd("git init")
+ for name in conf.repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ logger.info("copying data from %s..." % name)
+ dest_dir = repo['dest_dir']
+ if dest_dir and dest_dir != ".":
+ extract_dir = os.path.join(os.getcwd(), dest_dir)
+ os.makedirs(extract_dir)
+ else:
+ extract_dir = os.getcwd()
+ branch = repo.get('branch', "master")
+ file_filter = repo.get('file_filter', "")
+ runcmd("git archive %s | tar -x -C %s %s" % (branch, extract_dir, file_filter), ldir)
+ lastrev = runcmd("git rev-parse HEAD", ldir).strip()
+ conf.update(name, "last_revision", lastrev)
+ runcmd("git add .")
+ logger.info("Initial combo layer repository data has been created; please make any changes if desired and then use 'git commit' to make the initial commit.")
+ else:
+ logger.info("Repository already initialised, nothing to do.")
+
+
+def check_repo_clean(repodir):
+ """
+ check if the repo is clean
+ exit if repo is dirty
+ """
+ output=runcmd("git status --porcelain", repodir)
+ if output:
+ logger.error("git repo %s is dirty, please fix it first", repodir)
+ sys.exit(1)
+
+def check_patch(patchfile):
+ f = open(patchfile)
+ ln = f.readline()
+ of = None
+ in_patch = False
+ beyond_msg = False
+ pre_buf = ''
+ while ln:
+ if not beyond_msg:
+ if ln == '---\n':
+ if not of:
+ break
+ in_patch = False
+ beyond_msg = True
+ elif ln.startswith('--- '):
+ # We have a diff in the commit message
+ in_patch = True
+ if not of:
+ print('WARNING: %s contains a diff in its commit message, indenting to avoid failure during apply' % patchfile)
+ of = open(patchfile + '.tmp', 'w')
+ of.write(pre_buf)
+ pre_buf = ''
+ elif in_patch and not ln[0] in '+-@ \n\r':
+ in_patch = False
+ if of:
+ if in_patch:
+ of.write(' ' + ln)
+ else:
+ of.write(ln)
+ else:
+ pre_buf += ln
+ ln = f.readline()
+ f.close()
+ if of:
+ of.close()
+ os.rename(patchfile + '.tmp', patchfile)
+
+def action_update(conf, args):
+ """
+ update the component repos
+ generate the patch list
+ apply the generated patches
+ """
+ repos = []
+ if len(args) > 1:
+ for arg in args[1:]:
+ if arg.startswith('-'):
+ break
+ else:
+ repos.append(arg)
+ for repo in repos:
+ if not repo in conf.repos:
+ logger.error("Specified component '%s' not found in configuration" % repo)
+ sys.exit(0)
+
+ if not repos:
+ repos = conf.repos
+
+ # make sure all repos are clean
+ for name in repos:
+ check_repo_clean(conf.repos[name]['local_repo_dir'])
+ check_repo_clean(os.getcwd())
+
+ import uuid
+ patch_dir = "patch-%s" % uuid.uuid4()
+ os.mkdir(patch_dir)
+
+ for name in repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ dest_dir = repo['dest_dir']
+ branch = repo.get('branch', "master")
+ repo_patch_dir = os.path.join(os.getcwd(), patch_dir, name)
+
+ # Step 1: update the component repo
+ runcmd("git checkout %s" % branch, ldir)
+ logger.info("git pull for component repo %s in %s ..." % (name, ldir))
+ output=runcmd("git pull", ldir)
+ logger.info(output)
+
+ # Step 2: generate the patch list and store to patch dir
+ logger.info("generating patches for %s" % name)
+ if dest_dir != ".":
+ prefix = "--src-prefix=a/%s/ --dst-prefix=b/%s/" % (dest_dir, dest_dir)
+ else:
+ prefix = ""
+ if repo['last_revision'] == "":
+ logger.info("Warning: last_revision of component %s is not set, starting from the first commit" % name)
+ patch_cmd_range = "--root %s" % branch
+ rev_cmd_range = branch
+ else:
+ patch_cmd_range = "%s..%s" % (repo['last_revision'], branch)
+ rev_cmd_range = patch_cmd_range
+
+ file_filter = repo.get('file_filter',"")
+
+ patch_cmd = "git format-patch -N %s --output-directory %s %s -- %s" % \
+ (prefix,repo_patch_dir, patch_cmd_range, file_filter)
+ output = runcmd(patch_cmd, ldir)
+ logger.debug("generated patch set:\n%s" % output)
+ patchlist = output.splitlines()
+
+ rev_cmd = 'git rev-list --no-merges ' + rev_cmd_range
+ revlist = runcmd(rev_cmd, ldir).splitlines()
+
+ # Step 3: Call repo specific hook to adjust patch
+ if 'hook' in repo:
+ # hook parameter is: ./hook patchpath revision reponame
+ count=len(revlist)-1
+ for patch in patchlist:
+ runcmd("%s %s %s %s" % (repo['hook'], patch, revlist[count], name))
+ count=count-1
+
+ # Step 4: write patch list and revision list to file, for user to edit later
+ patchlist_file = os.path.join(os.getcwd(), patch_dir, "patchlist-%s" % name)
+ repo['patchlist'] = patchlist_file
+ f = open(patchlist_file, 'w')
+ count=len(revlist)-1
+ for patch in patchlist:
+ f.write("%s %s\n" % (patch, revlist[count]))
+ check_patch(os.path.join(patch_dir, patch))
+ count=count-1
+ f.close()
+
+ # Step 5: invoke bash for user to edit patch and patch list
+ if conf.interactive:
+ print 'Edit the patch and patch list in %s\n' \
+ 'For example, remove the unwanted patch entry from patchlist-*, so that it will be not applied later\n' \
+ 'When you are finished, run the following to continue:\n' \
+ ' exit 0 -- exit and continue to apply the patch\n' \
+ ' exit 1 -- abort and do not apply the patch\n' % patch_dir
+ ret = subprocess.call(["bash"], cwd=patch_dir)
+ if ret != 0:
+ print "Aborting without applying the patch"
+ sys.exit(0)
+
+ # Step 6: apply the generated and revised patch
+ apply_patchlist(conf, repos)
+ runcmd("rm -rf %s" % patch_dir)
+
+ # Step 7: commit the updated config file if it's being tracked
+ relpath = os.path.relpath(conf.conffile)
+ try:
+ output = runcmd("git status --porcelain %s" % relpath, printerr=False)
+ except:
+ # Outside the repository
+ output = None
+ if output:
+ logger.info("Committing updated configuration file")
+ if output.lstrip().startswith("M"):
+ runcmd('git commit -m "Automatic commit to update last_revision" %s' % relpath)
+
+def apply_patchlist(conf, repos):
+ """
+ apply the generated patch list to combo repo
+ """
+ for name in repos:
+ repo = conf.repos[name]
+ lastrev = repo["last_revision"]
+ for line in open(repo['patchlist']):
+ patchfile = line.split()[0]
+ lastrev = line.split()[1]
+ if os.path.getsize(patchfile) == 0:
+ logger.info("(skipping %s - no changes)", lastrev)
+ else:
+ cmd = "git am --keep-cr -s -p1 %s" % patchfile
+ logger.info("Apply %s" % patchfile )
+ try:
+ runcmd(cmd)
+ except subprocess.CalledProcessError:
+ logger.info('running "git am --abort" to cleanup repo')
+ runcmd("git am --abort")
+ logger.error('"%s" failed' % cmd)
+ logger.info("please manually apply patch %s" % patchfile)
+ logger.info("After applying, run this tool again to apply the remaining patches")
+ conf.update(name, "last_revision", lastrev)
+ sys.exit(0)
+ if lastrev != repo['last_revision']:
+ conf.update(name, "last_revision", lastrev)
+
+def action_splitpatch(conf, args):
+ """
+ generate the commit patch and
+ split the patch per repo
+ """
+ logger.debug("action_splitpatch")
+ if len(args) > 1:
+ commit = args[1]
+ else:
+ commit = "HEAD"
+ patchdir = "splitpatch-%s" % commit
+ if not os.path.exists(patchdir):
+ os.mkdir(patchdir)
+
+ # filerange_root is for the repo whose dest_dir is root "."
+ # and it should be specified by excluding all other repo dest dir
+ # like "-x repo1 -x repo2 -x repo3 ..."
+ filerange_root = ""
+ for name in conf.repos:
+ dest_dir = conf.repos[name]['dest_dir']
+ if dest_dir != ".":
+ filerange_root = '%s -x "%s/*"' % (filerange_root, dest_dir)
+
+ for name in conf.repos:
+ dest_dir = conf.repos[name]['dest_dir']
+ patch_filename = "%s/%s.patch" % (patchdir, name)
+ if dest_dir == ".":
+ cmd = "git format-patch -n1 --stdout %s^..%s | filterdiff -p1 %s > %s" % (commit, commit, filerange_root, patch_filename)
+ else:
+ cmd = "git format-patch --no-prefix -n1 --stdout %s^..%s -- %s > %s" % (commit, commit, dest_dir, patch_filename)
+ runcmd(cmd)
+ # Detect empty patches (including those produced by filterdiff above
+ # that contain only preamble text)
+ if os.path.getsize(patch_filename) == 0 or runcmd("filterdiff %s" % patch_filename) == "":
+ os.remove(patch_filename)
+ logger.info("(skipping %s - no changes)", name)
+ else:
+ logger.info(patch_filename)
+
+def action_error(conf, args):
+ logger.info("invalid action %s" % args[0])
+
+actions = {
+ "init": action_init,
+ "update": action_update,
+ "splitpatch": action_splitpatch,
+}
+
+def main():
+ parser = optparse.OptionParser(
+ version = "Combo Layer Repo Tool version %s" % __version__,
+ usage = """%prog [options] action
+
+Create and update a combination layer repository from multiple component repositories.
+
+Action:
+ init initialise the combo layer repo
+ update [components] get patches from component repos and apply them to the combo repo
+ splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
+
+ parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).",
+ action = "store", dest = "conffile", default = "conf/combo-layer.conf")
+
+ parser.add_option("-i", "--interactive", help = "interactive mode, user can edit the patch list and patches",
+ action = "store_true", dest = "interactive", default = False)
+
+ parser.add_option("-D", "--debug", help = "output debug information",
+ action = "store_true", dest = "debug", default = False)
+
+ options, args = parser.parse_args(sys.argv)
+
+ # Dispatch to action handler
+ if len(args) == 1:
+ logger.error("No action specified, exiting")
+ parser.print_help()
+ elif args[1] not in actions:
+ logger.error("Unsupported action %s, exiting\n" % (args[1]))
+ parser.print_help()
+ elif not os.path.exists(options.conffile):
+ logger.error("No valid config file, exiting\n")
+ parser.print_help()
+ else:
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
+ confdata = Configuration(options)
+ confdata.sanity_check()
+ actions.get(args[1], action_error)(confdata, args[1:])
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc(5)
+ sys.exit(ret)
diff --git a/scripts/combo-layer-hook-default.sh b/scripts/combo-layer-hook-default.sh
new file mode 100755
index 000000000..8b148aca0
--- /dev/null
+++ b/scripts/combo-layer-hook-default.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+# Hook to add source component/revision info to commit message
+# Parameter:
+# $1 patch-file
+# $2 revision
+# $3 reponame
+
+patchfile=$1
+rev=$2
+reponame=$3
+
+sed -i -e "s#^Subject: \[PATCH\] \(.*\)#Subject: \[PATCH\] $reponame: \1#" $patchfile
+sed -i -e "0,/^Signed-off-by:/s#\(^Signed-off-by:.*\)#\($reponame rev: $rev\)\n\n\1#" $patchfile
diff --git a/scripts/combo-layer.conf.example b/scripts/combo-layer.conf.example
new file mode 100644
index 000000000..010a69235
--- /dev/null
+++ b/scripts/combo-layer.conf.example
@@ -0,0 +1,56 @@
+# combo-layer example configuration file
+
+# component name
+[bitbake]
+# mandatory options
+# git upstream uri
+src_uri = git://git.openembedded.org/bitbake
+
+# the directory to clone the component repo
+local_repo_dir = /home/kyu3/src/test/bitbake
+
+# the relative dir within the combo repo to put the component files
+# use "." if the files should be in the root dir
+dest_dir = bitbake
+
+# the last update revision.
+# "init" will set this to the latest revision automatically, however if it
+# is empty when "update" is run, the tool will start from the first commit.
+# Note that this value will get updated by "update" if the component repo's
+# latest revision changed and the operation completes successfully.
+last_revision =
+
+# optional options:
+
+# branch: specify the branch in the component repo to pull from
+# (master if not specified)
+
+# file_filter: only include the specified file(s)
+# file_filter = [path] [path] ...
+# example:
+# file_filter = src/ : only include the subdir src
+# file_filter = src/*.c : only include the src *.c file
+# file_filter = src/main.c src/Makefile.am : only include these two files
+
+# hook: if provided, the tool will call the hook to process the generated
+# patch from upstream, and then apply the modified patch to the combo
+# repo.
+# the hook script is called as follows: ./hook patchpath revision reponame
+# example:
+# hook = combo-layer-hook-default.sh
+
+[oe-core]
+src_uri = git://git.openembedded.org/openembedded-core
+local_repo_dir = /home/kyu3/src/test/oecore
+dest_dir = .
+last_revision =
+
+# It is also possible to embed python code in the config values. Similar
+# to bitbake it considers every value starting with @ to be a python
+# script.
+# e.g. local_repo_dir could easily be configured using an environment
+# variable:
+#
+# [bitbake]
+# local_repo_dir = @os.getenv("LOCAL_REPO_DIR") + "/bitbake"
+#
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh
new file mode 100755
index 000000000..62aa66d96
--- /dev/null
+++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -0,0 +1,137 @@
+#!/bin/bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# This script operates on the .dat file generated by bb-matrix.sh. It tolerates
+# the header by skipping the first line, but error messages and bad data records
+# need to be removed first. It will generate three views of the plot, and leave
+# an interactive view open for further analysis.
+#
+# AUTHORS
+# Darren Hart <dvhart@linux.intel.com>
+#
+
+# Setup the defaults
+DATFILE="bb-matrix.dat"
+XLABEL="BB_NUMBER_THREADS"
+YLABEL="PARALLEL_MAKE"
+FIELD=3
+DEF_TITLE="Elapsed Time (seconds)"
+PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100"
+SIZE="640,480"
+
+function usage {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-d datfile] [-f field] [-h] [-t title] [-w]
+ -d datfile The data file generated by bb-matrix.sh (default: $DATFILE)
+ -f field The field index to plot as the Z axis from the data file
+ (default: $FIELD, "$DEF_TITLE")
+ -h Display this help message
+ -s W,H PNG and window size in pixels (default: $SIZE)
+ -t title The title to display, should describe the field (-f) and units
+ (default: "$DEF_TITLE")
+ -w Render the plot as wireframe with a 2D colormap projected on the
+ XY plane rather than as the texture for the surface
+EOM
+}
+
+# Parse and validate arguments
+while getopts "d:f:hs:t:w" OPT; do
+ case $OPT in
+ d)
+ DATFILE="$OPTARG"
+ ;;
+ f)
+ FIELD="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ s)
+ SIZE="$OPTARG"
+ ;;
+ t)
+ TITLE="$OPTARG"
+ ;;
+ w)
+ PM3D_FRAGMENT="set pm3d at b"
+ W="-w"
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+# Ensure the data file exists
+if [ ! -f "$DATFILE" ]; then
+ echo "ERROR: $DATFILE does not exist"
+ usage
+ exit 1
+fi
+PLOT_BASENAME=${DATFILE%.*}-f$FIELD$W
+
+# Set a sane title
+# TODO: parse the header and define titles for each format parameter for TIME(1)
+if [ -z "$TITLE" ]; then
+ if [ ! "$FIELD" == "3" ]; then
+ TITLE="Field $FIELD"
+ else
+ TITLE="$DEF_TITLE"
+ fi
+fi
+
+# Determine the dgrid3d mesh dimensions size
+MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sort | uniq | head -n1)
+MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 1 | sort | uniq | tail -n1)
+BB_CNT=$[${MAX#*0} - $MIN + 1]
+MIN=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sort | uniq | head -n1)
+MAX=$(tail -n +2 "$DATFILE" | cut -d ' ' -f 2 | sort | uniq | tail -n1)
+PM_CNT=$[${MAX#*0} - $MIN + 1]
+
+
+(cat <<EOF
+set title "$TITLE"
+set xlabel "$XLABEL"
+set ylabel "$YLABEL"
+set style line 100 lt 5 lw 1.5
+$PM3D_FRAGMENT
+set dgrid3d $PM_CNT,$BB_CNT
+set ticslevel 0.2
+
+set term png size $SIZE
+set output "$PLOT_BASENAME.png"
+splot "$DATFILE" every ::1 using 1:2:$FIELD with lines ls 100
+
+set view 90,0
+set output "$PLOT_BASENAME-bb.png"
+replot
+
+set view 90,90
+set output "$PLOT_BASENAME-pm.png"
+replot
+
+set view 60,30
+set term wxt size $SIZE
+replot
+EOF
+) | gnuplot --persist
diff --git a/scripts/contrib/bb-perf/bb-matrix.sh b/scripts/contrib/bb-perf/bb-matrix.sh
new file mode 100755
index 000000000..b9edd5ff0
--- /dev/null
+++ b/scripts/contrib/bb-perf/bb-matrix.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+# This script runs BB_CMD (typically building core-image-sato) for all
+# combincations of BB_RANGE and PM_RANGE values. It saves off all the console
+# logs, the buildstats directories, and creates a bb-pm-runtime.dat file which
+# can be used to postprocess the results with a plotting tool, spreadsheet, etc.
+# Before running this script, it is recommended that you pre-download all the
+# necessary sources by performing the BB_CMD once manually. It is also a good
+# idea to disable cron to avoid runtime variations caused by things like the
+# locate process. Be sure to sanitize the dat file prior to post-processing as
+# it may contain error messages or bad runs that should be removed.
+#
+# AUTHORS
+# Darren Hart <dvhart@linux.intel.com>
+#
+
+# The following ranges are appropriate for a 4 core system with 8 logical units
+BB_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16"
+PM_RANGE="04 05 06 07 08 09 10 11 12 13 14 15 16"
+
+DATADIR="bb-matrix-$$"
+BB_CMD="bitbake core-image-minimal"
+RUNTIME_LOG="$DATADIR/bb-matrix.dat"
+
+# See TIME(1) for a description of the time format parameters
+# The following all report 0: W K r s t w
+TIME_STR="%e %S %U %P %c %w %R %F %M %x"
+
+# Prepare the DATADIR
+mkdir $DATADIR
+if [ $? -ne 0 ]; then
+ echo "Failed to create $DATADIR."
+ exit 1
+fi
+
+# Add a simple header
+echo "BB PM $TIME_STR" > $RUNTIME_LOG
+for BB in $BB_RANGE; do
+ for PM in $PM_RANGE; do
+ RUNDIR="$DATADIR/$BB-$PM-build"
+ mkdir $RUNDIR
+ BB_LOG=$RUNDIR/$BB-$PM-bitbake.log
+ date
+ echo "BB=$BB PM=$PM Logging to $BB_LOG"
+
+ # Export the variables under test and run the bitbake command
+ export BB_NUMBER_THREADS=$(echo $BB | sed 's/^0*//')
+ export PARALLEL_MAKE="-j $(echo $PM | sed 's/^0*//')"
+ /usr/bin/time -f "$BB $PM $TIME_STR" -a -o $RUNTIME_LOG $BB_CMD &> $BB_LOG
+
+ echo " $(tail -n1 $RUNTIME_LOG)"
+ echo -n " Cleaning up..."
+ mv tmp/buildstats $RUNDIR/$BB-$PM-buildstats
+ rm -f pseudodone &> /dev/null
+ rm -rf tmp &> /dev/null
+ rm -rf sstate-cache &> /dev/null
+ rm -rf tmp-eglibc &> /dev/null
+ echo "done"
+ done
+done
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
new file mode 100755
index 000000000..0896d6444
--- /dev/null
+++ b/scripts/contrib/bbvars.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
+
+
+import sys
+import getopt
+import os
+import os.path
+import re
+
+def usage():
+ print 'Usage: %s -d FILENAME [-d FILENAME]* -m METADIR [-m MATADIR]*' % os.path.basename(sys.argv[0])
+ print ' -d FILENAME documentation file to search'
+ print ' -h, --help display this help and exit'
+ print ' -m METADIR meta directory to search for recipes'
+ print ' -t FILENAME documentation config file (for doc tags)'
+ print ' -T Only display variables with doc tags (requires -t)'
+
+def recipe_bbvars(recipe):
+ ''' Return a unique set of every bbvar encountered in the recipe '''
+ prog = re.compile("[A-Z_]+")
+ vset = set()
+ try:
+ r = open(recipe)
+ except IOError as (errno, strerror):
+ print 'WARNING: Failed to open recipe ', recipe
+ print strerror
+
+ for line in r:
+ # Strip any comments from the line
+ line = line.rsplit('#')[0]
+ vset = vset.union(set(prog.findall(line)))
+ r.close()
+
+ bbvars = {}
+ for v in vset:
+ bbvars[v] = 1
+
+ return bbvars
+
+def collect_bbvars(metadir):
+ ''' Walk the metadir and collect the bbvars from each recipe found '''
+ bbvars = {}
+ for root,dirs,files in os.walk(metadir):
+ for name in files:
+ if name.find(".bb") >= 0:
+ for key in recipe_bbvars(os.path.join(root,name)).iterkeys():
+ if bbvars.has_key(key):
+ bbvars[key] = bbvars[key] + 1
+ else:
+ bbvars[key] = 1
+ return bbvars
+
+def bbvar_is_documented(var, docfiles):
+ prog = re.compile(".*($|[^A-Z_])%s([^A-Z_]|$)" % (var))
+ for doc in docfiles:
+ try:
+ f = open(doc)
+ except IOError as (errno, strerror):
+ print 'WARNING: Failed to open doc ', doc
+ print strerror
+ for line in f:
+ if prog.match(line):
+ return True
+ f.close()
+ return False
+
+def bbvar_doctag(var, docconf):
+ prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
+ if docconf == "":
+ return "?"
+
+ try:
+ f = open(docconf)
+ except IOError as (errno, strerror):
+ return strerror
+
+ for line in f:
+ m = prog.search(line)
+ if m:
+ return m.group(1)
+
+ f.close()
+ return ""
+
+def main():
+ docfiles = []
+ metadirs = []
+ bbvars = {}
+ undocumented = []
+ docconf = ""
+ onlydoctags = False
+
+ # Collect and validate input
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "d:hm:t:T", ["help"])
+ except getopt.GetoptError, err:
+ print '%s' % str(err)
+ usage()
+ sys.exit(2)
+
+ for o, a in opts:
+ if o in ('-h', '--help'):
+ usage()
+ sys.exit(0)
+ elif o == '-d':
+ if os.path.isfile(a):
+ docfiles.append(a)
+ else:
+ print 'ERROR: documentation file %s is not a regular file' % (a)
+ sys.exit(3)
+ elif o == '-m':
+ if os.path.isdir(a):
+ metadirs.append(a)
+ else:
+ print 'ERROR: meta directory %s is not a directory' % (a)
+ sys.exit(4)
+ elif o == "-t":
+ if os.path.isfile(a):
+ docconf = a
+ elif o == "-T":
+ onlydoctags = True
+ else:
+ assert False, "unhandled option"
+
+ if len(docfiles) == 0:
+ print 'ERROR: no docfile specified'
+ usage()
+ sys.exit(5)
+
+ if len(metadirs) == 0:
+ print 'ERROR: no metadir specified'
+ usage()
+ sys.exit(6)
+
+ if onlydoctags and docconf == "":
+ print 'ERROR: no docconf specified'
+ usage()
+ sys.exit(7)
+
+ # Collect all the variable names from the recipes in the metadirs
+ for m in metadirs:
+ for key,cnt in collect_bbvars(m).iteritems():
+ if bbvars.has_key(key):
+ bbvars[key] = bbvars[key] + cnt
+ else:
+ bbvars[key] = cnt
+
+ # Check each var for documentation
+ varlen = 0
+ for v in bbvars.iterkeys():
+ if len(v) > varlen:
+ varlen = len(v)
+ if not bbvar_is_documented(v, docfiles):
+ undocumented.append(v)
+ undocumented.sort()
+ varlen = varlen + 1
+
+ # Report all undocumented variables
+ print 'Found %d undocumented bb variables (out of %d):' % (len(undocumented), len(bbvars))
+ header = '%s%s%s' % (str("VARIABLE").ljust(varlen), str("COUNT").ljust(6), str("DOCTAG").ljust(7))
+ print header
+ print str("").ljust(len(header), '=')
+ for v in undocumented:
+ doctag = bbvar_doctag(v, docconf)
+ if not onlydoctags or not doctag == "":
+ print '%s%s%s' % (v.ljust(varlen), str(bbvars[v]).ljust(6), doctag)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage
new file mode 100755
index 000000000..2cba9b28f
--- /dev/null
+++ b/scripts/contrib/ddimage
@@ -0,0 +1,87 @@
+#!/bin/sh
+
+#BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/sdc /dev/sdd /dev/sde"
+BLACKLIST_DEVICES="/dev/sda"
+
+# 1MB blocksize
+BLOCKSIZE=1048576
+
+function usage() {
+ echo "Usage: $(basename $0) IMAGE DEVICE"
+}
+
+function image_details() {
+ IMG=$1
+ echo "Image details"
+ echo "============="
+ echo " image: $(stat --printf '%N\n' $IMG)"
+ echo " size: $(stat -L --printf '%s bytes\n' $IMG)"
+ echo " modified: $(stat -L --printf '%y\n' $IMG)"
+ echo " type: $(file -L -b $IMG)"
+ echo ""
+}
+
+function device_details() {
+ DEV=$1
+ BLOCK_SIZE=512
+
+ echo "Device details"
+ echo "=============="
+ echo " device: $DEVICE"
+ if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
+ echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
+ else
+ echo " vendor: UNKOWN"
+ fi
+ if [ -f "/sys/class/block/$DEV/device/model" ]; then
+ echo " model: $(cat /sys/class/block/$DEV/device/model)"
+ else
+ echo " model: UNKNOWN"
+ fi
+ if [ -f "/sys/class/block/$DEV/size" ]; then
+ echo " size: $[$(cat /sys/class/block/$DEV/size)*BLOCK_SIZE] bytes"
+ else
+ echo " size: UNKNOWN"
+ fi
+ echo ""
+}
+
+if [ $# -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+IMAGE=$1
+DEVICE=$2
+
+if [ ! -e "$IMAGE" ]; then
+ echo "ERROR: Image $IMAGE does not exist"
+ usage
+ exit 1
+fi
+
+
+if [ "${BLACKLIST_DEVICES/${DEVICE}/ERROR}" != "$BLACKLIST_DEVICES" ]; then
+ echo "ERROR: Device $DEVICE is blacklisted"
+ exit 1
+fi
+
+if [ ! -w "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE does not exist or is not writable"
+ usage
+ exit 1
+fi
+
+image_details $IMAGE
+device_details $(basename $DEVICE)
+
+echo -n "Write $IMAGE to $DEVICE [y/N]? "
+read RESPONSE
+if [ "$RESPONSE" != "y" ]; then
+ echo "Write aborted"
+ exit 0
+fi
+
+echo "Writing image..."
+dd if="$IMAGE" of="$DEVICE" bs="$BLOCKSIZE"
+sync
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh
new file mode 100755
index 000000000..5b66f0367
--- /dev/null
+++ b/scripts/contrib/documentation-audit.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+#
+# Perform an audit of which packages provide documentation and which
+# are missing -doc packages.
+#
+# Setup requirements: be sure to be building for MACHINE=qemux86. Run
+# this script after source'ing the build environment script, so you're
+# running it from build/ directory.
+#
+# Maintainer: Scott Garman <scott.a.garman@intel.com>
+
+REPORT_DOC_SIMPLE="documentation_exists.txt"
+REPORT_DOC_DETAIL="documentation_exists_detail.txt"
+REPORT_MISSING_SIMPLE="documentation_missing.txt"
+REPORT_MISSING_DETAIL="documentation_missing_detail.txt"
+REPORT_BUILD_ERRORS="build_errors.txt"
+
+rm -rf $REPORT_DOC_SIMPLE $REPORT_DOC_DETAIL $REPORT_MISSING_SIMPLE $REPORT_MISSING_DETAIL
+
+BITBAKE=`which bitbake`
+if [ -z "$BITBAKE" ]; then
+ echo "Error: bitbake command not found."
+ echo "Did you forget to source the build environment script?"
+ exit 1
+fi
+
+echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results"
+echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or "
+echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\""
+
+for pkg in `bitbake -s | awk '{ print \$1 }'`; do
+ if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" ||
+ "$pkg" == "Parsing" || "$pkg" == "Package" ||
+ "$pkg" == "NOTE:" || "$pkg" == "WARNING:" ||
+ "$pkg" == "done." || "$pkg" == "============" ]]
+ then
+ # Skip initial bitbake output
+ continue
+ fi
+ if [[ "$pkg" =~ -native$ || "$pkg" =~ -nativesdk$ ||
+ "$pkg" =~ -cross-canadian ]]; then
+ # Skip native/nativesdk/cross-canadian recipes
+ continue
+ fi
+ if [[ "$pkg" =~ ^meta- || "$pkg" =~ ^task- || "$pkg" =~ -image ]]; then
+ # Skip meta, task and image recipes
+ continue
+ fi
+ if [[ "$pkg" =~ ^glibc- || "$pkg" =~ ^libiconv$ ||
+ "$pkg" =~ -toolchain$ || "$pkg" =~ ^package-index$ ||
+ "$pkg" =~ ^linux- || "$pkg" =~ ^adt-installer$ ||
+ "$pkg" =~ ^eds-tools$ || "$pkg" =~ ^external-python-tarball$ ||
+ "$pkg" =~ ^qt4-embedded$ || "$pkg" =~ ^qt-mobility ]]; then
+ # Skip glibc, libiconv, -toolchain, and other recipes known
+ # to cause build conflicts or trigger false positives.
+ continue
+ fi
+
+ echo "Building package $pkg..."
+ bitbake $pkg > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "There was an error building package $pkg" >> "$REPORT_MISSING_DETAIL"
+ echo "$pkg" >> $REPORT_BUILD_ERRORS
+
+ # Do not skip the remaining tests, as sometimes the
+ # exit status is 1 due to QA errors, and we can still
+ # perform the -doc checks.
+ fi
+
+ echo "$pkg built successfully, checking for a documentation package..."
+ WORKDIR=`bitbake -e $pkg | grep ^WORKDIR | awk -F '=' '{ print \$2 }' | awk -F '"' '{ print \$2 }'`
+ FIND_DOC_PKG=`find $WORKDIR/packages-split/*-doc -maxdepth 0 -type d`
+ if [ -z "$FIND_DOC_PKG" ]; then
+ # No -doc package was generated:
+ echo "No -doc package: $pkg" >> "$REPORT_MISSING_DETAIL"
+ echo "$pkg" >> $REPORT_MISSING_SIMPLE
+ continue
+ fi
+
+ FIND_DOC_FILES=`find $FIND_DOC_PKG -type f`
+ if [ -z "$FIND_DOC_FILES" ]; then
+ # No files shipped with the -doc package:
+ echo "No files shipped with the -doc package: $pkg" >> "$REPORT_MISSING_DETAIL"
+ echo "$pkg" >> $REPORT_MISSING_SIMPLE
+ continue
+ fi
+
+ echo "Documentation shipped with $pkg:" >> "$REPORT_DOC_DETAIL"
+ echo "$FIND_DOC_FILES" >> "$REPORT_DOC_DETAIL"
+ echo "" >> "$REPORT_DOC_DETAIL"
+
+ echo "$pkg" >> "$REPORT_DOC_SIMPLE"
+done
diff --git a/scripts/contrib/python/generate-manifest-2.7.py b/scripts/contrib/python/generate-manifest-2.7.py
new file mode 100755
index 000000000..7b43137bd
--- /dev/null
+++ b/scripts/contrib/python/generate-manifest-2.7.py
@@ -0,0 +1,385 @@
+#!/usr/bin/env python
+
+# generate Python Manifest for the OpenEmbedded build system
+# (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
+# (C) 2007 Jeremy Laine
+# licensed under MIT, see COPYING.MIT
+#
+# June 22, 2011 -- Mark Hatle <mark.hatle@windriver.com>
+# * Updated to no longer generate special -dbg package, instead use the
+# single system -dbg
+# * Update version with ".1" to indicate this change
+
+import os
+import sys
+import time
+
+VERSION = "2.7.2"
+
+__author__ = "Michael 'Mickey' Lauer <mlauer@vanille-media.de>"
+__version__ = "20110222.2"
+
+class MakefileMaker:
+
+ def __init__( self, outfile ):
+ """initialize"""
+ self.packages = {}
+ self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
+ self.output = outfile
+ self.out( """
+# WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
+# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
+# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
+""" % ( sys.argv[0], __version__ ) )
+
+ #
+ # helper functions
+ #
+
+ def out( self, data ):
+ """print a line to the output file"""
+ self.output.write( "%s\n" % data )
+
+ def setPrefix( self, targetPrefix ):
+ """set a file prefix for addPackage files"""
+ self.targetPrefix = targetPrefix
+
+ def doProlog( self ):
+ self.out( """ """ )
+ self.out( "" )
+
+ def addPackage( self, name, description, dependencies, filenames ):
+ """add a package to the Makefile"""
+ if type( filenames ) == type( "" ):
+ filenames = filenames.split()
+ fullFilenames = []
+ for filename in filenames:
+ if filename[0] != "$":
+ fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
+ else:
+ fullFilenames.append( filename )
+ self.packages[name] = description, dependencies, fullFilenames
+
+ def doBody( self ):
+ """generate body of Makefile"""
+
+ global VERSION
+
+ #
+ # generate provides line
+ #
+
+ provideLine = 'PROVIDES+="'
+ for name in sorted(self.packages):
+ provideLine += "%s " % name
+ provideLine += '"'
+
+ self.out( provideLine )
+ self.out( "" )
+
+ #
+ # generate package line
+ #
+
+ packageLine = 'PACKAGES="${PN}-dbg '
+ for name in sorted(self.packages):
+ if name.startswith("${PN}-distutils"):
+ if name == "${PN}-distutils":
+ packageLine += "%s-staticdev %s " % (name, name)
+ elif name != '${PN}-dbg':
+ packageLine += "%s " % name
+ packageLine += '${PN}-modules"'
+
+ self.out( packageLine )
+ self.out( "" )
+
+ #
+ # generate package variables
+ #
+
+ for name, data in sorted(self.packages.iteritems()):
+ desc, deps, files = data
+
+ #
+ # write out the description, revision and dependencies
+ #
+ self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
+ self.out( 'RDEPENDS_%s="%s"' % ( name, deps ) )
+
+ line = 'FILES_%s="' % name
+
+ #
+ # check which directories to make in the temporary directory
+ #
+
+ dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
+ for target in files:
+ dirset[os.path.dirname( target )] = True
+
+ #
+ # generate which files to copy for the target (-dfR because whole directories are also allowed)
+ #
+
+ for target in files:
+ line += "%s " % target
+
+ line += '"'
+ self.out( line )
+ self.out( "" )
+
+ self.out( 'DESCRIPTION_${PN}-modules="All Python modules"' )
+ line = 'RDEPENDS_${PN}-modules="'
+
+ for name, data in sorted(self.packages.iteritems()):
+ if name not in ['${PN}-dev', '${PN}-distutils-staticdev']:
+ line += "%s " % name
+
+ self.out( "%s \"" % line )
+ self.out( 'ALLOW_EMPTY_${PN}-modules = "1"' )
+
+ def doEpilog( self ):
+ self.out( """""" )
+ self.out( "" )
+
+ def make( self ):
+ self.doProlog()
+ self.doBody()
+ self.doEpilog()
+
+if __name__ == "__main__":
+
+ if len( sys.argv ) > 1:
+ os.popen( "rm -f ./%s" % sys.argv[1] )
+ outfile = file( sys.argv[1], "w" )
+ else:
+ outfile = sys.stdout
+
+ m = MakefileMaker( outfile )
+
+ # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
+ # Parameters: revision, name, description, dependencies, filenames
+ #
+
+ m.addPackage( "${PN}-core", "Python Interpreter and core modules (needed!)", "${PN}-lang ${PN}-re",
+ "__future__.* _abcoll.* abc.* copy.* copy_reg.* ConfigParser.* " +
+ "genericpath.* getopt.* linecache.* new.* " +
+ "os.* posixpath.* struct.* " +
+ "warnings.* site.* stat.* " +
+ "UserDict.* UserList.* UserString.* " +
+ "lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
+ "lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* " +
+ "_weakrefset.* sysconfig.* config/Makefile " +
+ "${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
+ "${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
+
+ m.addPackage( "${PN}-dev", "Python Development Package", "${PN}-core",
+ "${includedir} " +
+ "${libdir}/lib*${SOLIBSDEV} " +
+ "${libdir}/*.la " +
+ "${libdir}/*.a " +
+ "${libdir}/*.o " +
+ "${libdir}/pkgconfig " +
+ "${base_libdir}/*.a " +
+ "${base_libdir}/*.o " +
+ "${datadir}/aclocal " +
+ "${datadir}/pkgconfig " )
+
+ m.addPackage( "${PN}-2to3", "Python Automated Python 2 to 3 code translation", "${PN}-core",
+ "${bindir}/2to3 lib2to3" ) # package
+
+ m.addPackage( "${PN}-idle", "Python Integrated Development Environment", "${PN}-core ${PN}-tkinter",
+ "${bindir}/idle idlelib" ) # package
+
+ m.addPackage( "${PN}-pydoc", "Python Interactive Help Support", "${PN}-core ${PN}-lang ${PN}-stringold ${PN}-re",
+ "${bindir}/pydoc pydoc.* pydoc_data" )
+
+ m.addPackage( "${PN}-smtpd", "Python Simple Mail Transport Daemon", "${PN}-core ${PN}-netserver ${PN}-email ${PN}-mime",
+ "${bindir}/smtpd.* smtpd.*" )
+
+ m.addPackage( "${PN}-audio", "Python Audio Handling", "${PN}-core",
+ "wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so audiodev.* sunaudio.* sunau.* toaiff.*" )
+
+ m.addPackage( "${PN}-bsddb", "Python Berkeley Database Bindings", "${PN}-core",
+ "bsddb lib-dynload/_bsddb.so" ) # package
+
+ m.addPackage( "${PN}-codecs", "Python Codecs, Encodings & i18n Support", "${PN}-core ${PN}-lang",
+ "codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/_codecs* lib-dynload/_multibytecodec.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
+
+ m.addPackage( "${PN}-compile", "Python Bytecode Compilation Support", "${PN}-core",
+ "py_compile.* compileall.*" )
+
+ m.addPackage( "${PN}-compiler", "Python Compiler Support", "${PN}-core",
+ "compiler" ) # package
+
+ m.addPackage( "${PN}-compression", "Python High Level Compression Support", "${PN}-core ${PN}-zlib",
+ "gzip.* zipfile.* tarfile.* lib-dynload/bz2.so" )
+
+ m.addPackage( "${PN}-crypt", "Python Basic Cryptographic and Hashing Support", "${PN}-core",
+ "hashlib.* md5.* sha.* lib-dynload/crypt.so lib-dynload/_hashlib.so lib-dynload/_sha256.so lib-dynload/_sha512.so" )
+
+ m.addPackage( "${PN}-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-stringold",
+ "lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
+
+ m.addPackage( "${PN}-curses", "Python Curses Support", "${PN}-core",
+ "curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # directory + low level module
+
+ m.addPackage( "${PN}-ctypes", "Python C Types Support", "${PN}-core",
+ "ctypes lib-dynload/_ctypes.so lib-dynload/_ctypes_test.so" ) # directory + low level module
+
+ m.addPackage( "${PN}-datetime", "Python Calendar and Time support", "${PN}-core ${PN}-codecs",
+ "_strptime.* calendar.* lib-dynload/datetime.so" )
+
+ m.addPackage( "${PN}-db", "Python File-Based Database Support", "${PN}-core",
+ "anydbm.* dumbdbm.* whichdb.* " )
+
+ m.addPackage( "${PN}-debugger", "Python Debugger", "${PN}-core ${PN}-io ${PN}-lang ${PN}-re ${PN}-stringold ${PN}-shell ${PN}-pprint",
+ "bdb.* pdb.*" )
+
+ m.addPackage( "${PN}-difflib", "Python helpers for computing deltas between objects.", "${PN}-lang ${PN}-re",
+ "difflib.*" )
+
+ m.addPackage( "${PN}-distutils-staticdev", "Python Distribution Utilities (Static Libraries)", "${PN}-distutils",
+ "config/lib*.a" ) # package
+
+ m.addPackage( "${PN}-distutils", "Python Distribution Utilities", "${PN}-core",
+ "config distutils" ) # package
+
+ m.addPackage( "${PN}-doctest", "Python framework for running examples in docstrings.", "${PN}-core ${PN}-lang ${PN}-io ${PN}-re ${PN}-unittest ${PN}-debugger ${PN}-difflib",
+ "doctest.*" )
+
+ # FIXME consider adding to some higher level package
+ m.addPackage( "${PN}-elementtree", "Python elementree", "${PN}-core",
+ "lib-dynload/_elementtree.so" )
+
+ m.addPackage( "${PN}-email", "Python Email Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-mime ${PN}-audio ${PN}-image ${PN}-netclient",
+ "imaplib.* email" ) # package
+
+ m.addPackage( "${PN}-fcntl", "Python's fcntl Interface", "${PN}-core",
+ "lib-dynload/fcntl.so" )
+
+ m.addPackage( "${PN}-hotshot", "Python Hotshot Profiler", "${PN}-core",
+ "hotshot lib-dynload/_hotshot.so" )
+
+ m.addPackage( "${PN}-html", "Python HTML Processing", "${PN}-core",
+ "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
+
+ m.addPackage( "${PN}-gdbm", "Python GNU Database Support", "${PN}-core",
+ "lib-dynload/gdbm.so" )
+
+ m.addPackage( "${PN}-image", "Python Graphical Image Handling", "${PN}-core",
+ "colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
+
+ m.addPackage( "${PN}-io", "Python Low-Level I/O", "${PN}-core ${PN}-math",
+ "lib-dynload/_socket.so lib-dynload/_io.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so " +
+ "pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
+
+ m.addPackage( "${PN}-json", "Python JSON Support", "${PN}-core ${PN}-math ${PN}-re",
+ "json lib-dynload/_json.so" ) # package
+
+ m.addPackage( "${PN}-lang", "Python Low-Level Language Support", "${PN}-core",
+ "lib-dynload/_bisect.so lib-dynload/_collections.so lib-dynload/_heapq.so lib-dynload/_weakref.so lib-dynload/_functools.so " +
+ "lib-dynload/array.so lib-dynload/itertools.so lib-dynload/operator.so lib-dynload/parser.so " +
+ "atexit.* bisect.* code.* codeop.* collections.* dis.* functools.* heapq.* inspect.* keyword.* opcode.* symbol.* repr.* token.* " +
+ "tokenize.* traceback.* weakref.*" )
+
+ m.addPackage( "${PN}-logging", "Python Logging Support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-stringold",
+ "logging" ) # package
+
+ m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
+ "mailbox.*" )
+
+ m.addPackage( "${PN}-math", "Python Math Support", "${PN}-core",
+ "lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
+
+ m.addPackage( "${PN}-mime", "Python MIME Handling APIs", "${PN}-core ${PN}-io",
+ "mimetools.* uu.* quopri.* rfc822.* MimeWriter.*" )
+
+ m.addPackage( "${PN}-mmap", "Python Memory-Mapped-File Support", "${PN}-core ${PN}-io",
+ "lib-dynload/mmap.so " )
+
+ m.addPackage( "${PN}-multiprocessing", "Python Multiprocessing Support", "${PN}-core ${PN}-io ${PN}-lang",
+ "lib-dynload/_multiprocessing.so multiprocessing" ) # package
+
+ m.addPackage( "${PN}-netclient", "Python Internet Protocol Clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
+ "*Cookie*.* " +
+ "base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.* uuid.* rfc822.* mimetools.*" )
+
+ m.addPackage( "${PN}-netserver", "Python Internet Protocol Servers", "${PN}-core ${PN}-netclient",
+ "cgi.* *HTTPServer.* SocketServer.*" )
+
+ m.addPackage( "${PN}-numbers", "Python Number APIs", "${PN}-core ${PN}-lang ${PN}-re",
+ "decimal.* numbers.*" )
+
+ m.addPackage( "${PN}-pickle", "Python Persistence Support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
+ "pickle.* shelve.* lib-dynload/cPickle.so pickletools.*" )
+
+ m.addPackage( "${PN}-pkgutil", "Python Package Extension Utility Support", "${PN}-core",
+ "pkgutil.*")
+
+ m.addPackage( "${PN}-pprint", "Python Pretty-Print Support", "${PN}-core",
+ "pprint.*" )
+
+ m.addPackage( "${PN}-profile", "Python Basic Profiling Support", "${PN}-core ${PN}-textutils",
+ "profile.* pstats.* cProfile.* lib-dynload/_lsprof.so" )
+
+ m.addPackage( "${PN}-re", "Python Regular Expression APIs", "${PN}-core",
+ "re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
+
+ m.addPackage( "${PN}-readline", "Python Readline Support", "${PN}-core",
+ "lib-dynload/readline.so rlcompleter.*" )
+
+ m.addPackage( "${PN}-resource", "Python Resource Control Interface", "${PN}-core",
+ "lib-dynload/resource.so" )
+
+ m.addPackage( "${PN}-shell", "Python Shell-Like Functionality", "${PN}-core ${PN}-re",
+ "cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shlex.* shutil.*" )
+
+ m.addPackage( "${PN}-robotparser", "Python robots.txt parser", "${PN}-core ${PN}-netclient",
+ "robotparser.*")
+
+ m.addPackage( "${PN}-subprocess", "Python Subprocess Support", "${PN}-core ${PN}-io ${PN}-re ${PN}-fcntl ${PN}-pickle",
+ "subprocess.*" )
+
+ m.addPackage( "${PN}-sqlite3", "Python Sqlite3 Database Support", "${PN}-core ${PN}-datetime ${PN}-lang ${PN}-crypt ${PN}-io ${PN}-threading ${PN}-zlib",
+ "lib-dynload/_sqlite3.so sqlite3/dbapi2.* sqlite3/__init__.* sqlite3/dump.*" )
+
+ m.addPackage( "${PN}-sqlite3-tests", "Python Sqlite3 Database Support Tests", "${PN}-core ${PN}-sqlite3",
+ "sqlite3/test" )
+
+ m.addPackage( "${PN}-stringold", "Python String APIs [deprecated]", "${PN}-core ${PN}-re",
+ "lib-dynload/strop.so string.* stringold.*" )
+
+ m.addPackage( "${PN}-syslog", "Python Syslog Interface", "${PN}-core",
+ "lib-dynload/syslog.so" )
+
+ m.addPackage( "${PN}-terminal", "Python Terminal Controlling Support", "${PN}-core ${PN}-io",
+ "pty.* tty.*" )
+
+ m.addPackage( "${PN}-tests", "Python Tests", "${PN}-core",
+ "test" ) # package
+
+ m.addPackage( "${PN}-threading", "Python Threading & Synchronization Support", "${PN}-core ${PN}-lang",
+ "_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
+
+ m.addPackage( "${PN}-tkinter", "Python Tcl/Tk Bindings", "${PN}-core",
+ "lib-dynload/_tkinter.so lib-tk" ) # package
+
+ m.addPackage( "${PN}-unittest", "Python Unit Testing Framework", "${PN}-core ${PN}-stringold ${PN}-lang",
+ "unittest/" )
+
+ m.addPackage( "${PN}-unixadmin", "Python Unix Administration Support", "${PN}-core",
+ "lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
+
+ m.addPackage( "${PN}-xml", "Python basic XML support.", "${PN}-core ${PN}-elementtree ${PN}-re",
+ "lib-dynload/pyexpat.so xml xmllib.*" ) # package
+
+ m.addPackage( "${PN}-xmlrpc", "Python XMLRPC Support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
+ "xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.*" )
+
+ m.addPackage( "${PN}-zlib", "Python zlib Support.", "${PN}-core",
+ "lib-dynload/zlib.so" )
+
+ m.addPackage( "${PN}-mailbox", "Python Mailbox Format Support", "${PN}-core ${PN}-mime",
+ "mailbox.*" )
+
+ m.make()
diff --git a/scripts/contrib/test_build_time.sh b/scripts/contrib/test_build_time.sh
new file mode 100755
index 000000000..9e5725ae5
--- /dev/null
+++ b/scripts/contrib/test_build_time.sh
@@ -0,0 +1,237 @@
+#!/bin/bash
+
+# Build performance regression test script
+#
+# Copyright 2011 Intel Corporation
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# DESCRIPTION
+# This script is intended to be used in conjunction with "git bisect run"
+# in order to find regressions in build time, however it can also be used
+# independently. It cleans out the build output directories, runs a
+# specified worker script (an example is test_build_time_worker.sh) under
+# TIME(1), logs the results to TEST_LOGDIR (default /tmp) and returns a
+# value telling "git bisect run" whether the build time is good (under
+# the specified threshold) or bad (over it). There is also a tolerance
+# option but it is not particularly useful as it only subtracts the
+# tolerance from the given threshold and uses it as the actual threshold.
+#
+# It is also capable of taking a file listing git revision hashes to be
+# test-applied to the repository in order to get past build failures that
+# would otherwise cause certain revisions to have to be skipped; if a
+# revision does not apply cleanly then the script assumes it does not
+# need to be applied and ignores it.
+#
+# Please see the help output (syntax below) for some important setup
+# instructions.
+#
+# AUTHORS
+# Paul Eggleton <paul.eggleton@linux.intel.com>
+
+
+syntax() {
+ echo "syntax: $0 <script> <time> <tolerance> [patchrevlist]"
+ echo ""
+ echo " script - worker script file (if in current dir, prefix with ./)"
+ echo " time - time threshold (in seconds, suffix m for minutes)"
+ echo " tolerance - tolerance (in seconds, suffix m for minutes or % for"
+ echo " percentage, can be 0)"
+ echo " patchrevlist - optional file listing revisions to apply as patches on top"
+ echo ""
+ echo "You must set TEST_BUILDDIR to point to a previously created build directory,"
+ echo "however please note that this script will wipe out the TMPDIR defined in"
+ echo "TEST_BUILDDIR/conf/local.conf as part of its initial setup (as well as your"
+ echo "~/.ccache)"
+ echo ""
+ echo "To get rid of the sudo prompt, please add the following line to /etc/sudoers"
+ echo "(use 'visudo' to edit this; also it is assumed that the user you are running"
+ echo "as is a member of the 'wheel' group):"
+ echo ""
+ echo "%wheel ALL=(ALL) NOPASSWD: /sbin/sysctl -w vm.drop_caches=[1-3]"
+ echo ""
+ echo "Note: it is recommended that you disable crond and any other process that"
+ echo "may cause significant CPU or I/O usage during build performance tests."
+}
+
+# Note - we exit with 250 here because that will tell git bisect run that
+# something bad happened and stop
+if [ "$1" = "" ] ; then
+ syntax
+ exit 250
+fi
+
+if [ "$2" = "" ] ; then
+ syntax
+ exit 250
+fi
+
+if [ "$3" = "" ] ; then
+ syntax
+ exit 250
+fi
+
+if ! [[ "$2" =~ ^[0-9][0-9m.]*$ ]] ; then
+ echo "'$2' is not a valid number for threshold"
+ exit 250
+fi
+
+if ! [[ "$3" =~ ^[0-9][0-9m.%]*$ ]] ; then
+ echo "'$3' is not a valid number for tolerance"
+ exit 250
+fi
+
+if [ "$TEST_BUILDDIR" = "" ] ; then
+ echo "Please set TEST_BUILDDIR to a previously created build directory"
+ exit 250
+fi
+
+if [ ! -d "$TEST_BUILDDIR" ] ; then
+ echo "TEST_BUILDDIR $TEST_BUILDDIR not found"
+ exit 250
+fi
+
+git diff --quiet
+if [ $? != 0 ] ; then
+ echo "Working tree is dirty, cannot proceed"
+ exit 251
+fi
+
+if [ "$BB_ENV_EXTRAWHITE" != "" ] ; then
+ echo "WARNING: you are running after sourcing the build environment script, this is not recommended"
+fi
+
+runscript=$1
+timethreshold=$2
+tolerance=$3
+
+if [ "$4" != "" ] ; then
+ patchrevlist=`cat $4`
+else
+ patchrevlist=""
+fi
+
+if [[ timethreshold == *m* ]] ; then
+ timethreshold=`echo $timethreshold | sed s/m/*60/ | bc`
+fi
+
+if [[ $tolerance == *m* ]] ; then
+ tolerance=`echo $tolerance | sed s/m/*60/ | bc`
+elif [[ $tolerance == *%* ]] ; then
+ tolerance=`echo $tolerance | sed s/%//`
+ tolerance=`echo "scale = 2; (($tolerance * $timethreshold) / 100)" | bc`
+fi
+
+tmpdir=`grep "^TMPDIR" $TEST_BUILDDIR/conf/local.conf | sed -e 's/TMPDIR[ \t]*=[ \t\?]*"//' -e 's/"//'`
+if [ "x$tmpdir" = "x" ]; then
+ echo "Unable to determine TMPDIR from $TEST_BUILDDIR/conf/local.conf, bailing out"
+ exit 250
+fi
+sstatedir=`grep "^SSTATE_DIR" $TEST_BUILDDIR/conf/local.conf | sed -e 's/SSTATE_DIR[ \t\?]*=[ \t]*"//' -e 's/"//'`
+if [ "x$sstatedir" = "x" ]; then
+ echo "Unable to determine SSTATE_DIR from $TEST_BUILDDIR/conf/local.conf, bailing out"
+ exit 250
+fi
+
+if [ `expr length $tmpdir` -lt 4 ] ; then
+ echo "TMPDIR $tmpdir is less than 4 characters, bailing out"
+ exit 250
+fi
+
+if [ `expr length $sstatedir` -lt 4 ] ; then
+ echo "SSTATE_DIR $sstatedir is less than 4 characters, bailing out"
+ exit 250
+fi
+
+echo -n "About to wipe out TMPDIR $tmpdir, press Ctrl+C to break out... "
+for i in 9 8 7 6 5 4 3 2 1
+do
+ echo -ne "\x08$i"
+ sleep 1
+done
+echo
+
+pushd . > /dev/null
+
+rm -f pseudodone
+echo "Removing TMPDIR $tmpdir..."
+rm -rf $tmpdir
+echo "Removing TMPDIR $tmpdir-*libc..."
+rm -rf $tmpdir-*libc
+echo "Removing SSTATE_DIR $sstatedir..."
+rm -rf $sstatedir
+echo "Removing ~/.ccache..."
+rm -rf ~/.ccache
+
+echo "Syncing..."
+sync
+sync
+echo "Dropping VM cache..."
+#echo 3 > /proc/sys/vm/drop_caches
+sudo /sbin/sysctl -w vm.drop_caches=3 > /dev/null
+
+if [ "$TEST_LOGDIR" = "" ] ; then
+ logdir="/tmp"
+else
+ logdir="$TEST_LOGDIR"
+fi
+rev=`git rev-parse HEAD`
+logfile="$logdir/timelog_$rev.log"
+echo -n > $logfile
+
+gitroot=`git rev-parse --show-toplevel`
+cd $gitroot
+for patchrev in $patchrevlist ; do
+ echo "Applying $patchrev"
+ patchfile=`mktemp`
+ git show $patchrev > $patchfile
+ git apply --check $patchfile &> /dev/null
+ if [ $? != 0 ] ; then
+ echo " ... patch does not apply without errors, ignoring"
+ else
+ echo "Applied $patchrev" >> $logfile
+ git apply $patchfile &> /dev/null
+ fi
+ rm $patchfile
+done
+
+sync
+echo "Quiescing for 5s..."
+sleep 5
+
+echo "Running $runscript at $rev..."
+timeoutfile=`mktemp`
+/usr/bin/time -o $timeoutfile -f "%e\nreal\t%E\nuser\t%Us\nsys\t%Ss\nmaxm\t%Mk" $runscript 2>&1 | tee -a $logfile
+exitstatus=$PIPESTATUS
+
+git reset --hard HEAD > /dev/null
+popd > /dev/null
+
+timeresult=`head -n1 $timeoutfile`
+cat $timeoutfile | tee -a $logfile
+rm $timeoutfile
+
+if [ $exitstatus != 0 ] ; then
+ # Build failed, exit with 125 to tell git bisect run to skip this rev
+ echo "*** Build failed (exit code $exitstatus), skipping..." | tee -a $logfile
+ exit 125
+fi
+
+ret=`echo "scale = 2; $timeresult > $timethreshold - $tolerance" | bc`
+echo "Returning $ret" | tee -a $logfile
+exit $ret
+
diff --git a/scripts/contrib/test_build_time_worker.sh b/scripts/contrib/test_build_time_worker.sh
new file mode 100755
index 000000000..8e20a9ea7
--- /dev/null
+++ b/scripts/contrib/test_build_time_worker.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# This is an example script to be used in conjunction with test_build_time.sh
+
+if [ "$TEST_BUILDDIR" = "" ] ; then
+ echo "TEST_BUILDDIR is not set"
+ exit 1
+fi
+
+buildsubdir=`basename $TEST_BUILDDIR`
+if [ ! -d $buildsubdir ] ; then
+ echo "Unable to find build subdir $buildsubdir in current directory"
+ exit 1
+fi
+
+if [ -f oe-init-build-env ] ; then
+ . ./oe-init-build-env $buildsubdir
+elif [ -f poky-init-build-env ] ; then
+ . ./poky-init-build-env $buildsubdir
+else
+ echo "Unable to find build environment setup script"
+ exit 1
+fi
+
+if [ -f ../meta/recipes-sato/images/core-image-sato.bb ] ; then
+ target="core-image-sato"
+else
+ target="poky-image-sato"
+fi
+
+echo "Build started at `date "+%Y-%m-%d %H:%M:%S"`"
+echo "bitbake $target"
+bitbake $target
+ret=$?
+echo "Build finished at `date "+%Y-%m-%d %H:%M:%S"`"
+exit $ret
+
diff --git a/scripts/create-lsb-image b/scripts/create-lsb-image
new file mode 100755
index 000000000..48b96e241
--- /dev/null
+++ b/scripts/create-lsb-image
@@ -0,0 +1,228 @@
+#!/bin/bash
+#
+# Copyright (C) 2010-2011 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+red='\E[31;40m'
+green='\E[32;40m'
+
+#Get current owner
+OWNER=`whoami`
+#Get group
+GROUP=`id -gn ${USER}`
+MACHINE_ARCH=`bitbake -e | sed -n 's/^MACHINE_ARCH=\"\(.*\)\"/\1/p'`
+DEPLOY_DIR_IMAGE=`bitbake -e | sed -n 's/^DEPLOY_DIR_IMAGE=\"\(.*\)\"/\1/p'`
+#Get value of varibale MACHINE_INE and DEPLOY_DIR_IMAGE
+LSB_IMAGE=poky-image-lsb-${MACHINE_ARCH}-test.ext3
+
+ECHO()
+{
+ echo -e "${green}$@"
+ tput sgr0
+}
+
+ERROR()
+{
+ echo -e "${red}$@"
+ tput sgr0
+ exit 1
+}
+
+exit_check()
+{
+ [ $? -ne 0 ] && exit $?
+}
+
+usage()
+{
+ ECHO "Usage: PC\$ create-lsb-image ARCH ROOTFS_IMAGE"
+ ECHO " ARCH: x86 or x86-64 or ppc32"
+ ECHO " ROOTFS_IMAGE: \
+Name of the rootfs image with suffix \"tar.bz2\""
+
+
+ ECHO ""
+ ECHO "Examples:"
+ ECHO " PC\$ creat-lsb-image \
+x86 poky-image-lsb-qemux86-20110317030443.rootfs.tar.bz2"
+ exit 1
+}
+
+#There should be two parameters to get machine type and name of image
+if [ $# -ne 2 ]; then
+ usage
+fi
+
+#Get list for lsb test suite
+case ${1} in
+"x86")
+ T_ARCH=ia32
+ P_ARCH=i486
+ COM_PACKAGE_LIST="lsb-dist-testkit-4.1.0-5.${T_ARCH}.tar.gz"
+ ;;
+"x86-64")
+ T_ARCH=amd64
+ P_ARCH=x86_64
+ MACHINE_ARCH=${MACHINE_ARCH/x86_64/x86-64}
+ COM_PACKAGE_LIST="lsb-dist-testkit-4.1.0-5.${P_ARCH}.tar.gz"
+ ;;
+"ppc32")
+ P_ARCH=ppc
+ T_ARCH=${1}
+ COM_PACKAGE_LIST="lsb-dist-testkit-4.1.0-5.${T_ARCH}.tar.gz"
+ ;;
+*)
+ usage
+ ;;
+esac
+
+APP_PACKAGE_RPMLIST="lsb-apache-2.2.14-3.lsb4.${P_ARCH}.rpm \
+ lsb-tcl-8.5.7-6.lsb4.${P_ARCH}.rpm \
+ lsb-expect-5.43.0-11.lsb4.${P_ARCH}.rpm \
+ lsb-groff-1.20.1-5.lsb4.${P_ARCH}.rpm \
+ lsb-raptor-1.4.19-3.lsb4.${P_ARCH}.rpm \
+ lsb-xpdf-1.01-10.lsb4.${P_ARCH}.rpm \
+ lsb-samba-3.4.3-5.lsb4.${P_ARCH}.rpm \
+ lsb-rsync-3.0.6-3.lsb4.${P_ARCH}.rpm"
+
+APP_PACKAGE_SOURCELIST="expect-tests.tar \
+ tcl-tests.tar \
+ raptor-tests.tar \
+ test1.pdf \
+ test2.pdf"
+
+PACKAGE_LIST="${COM_PACKAGE_LIST} \
+ ${APP_PACKAGE_RPMLIST} \
+ ${APP_PACKAGE_SOURCELIST}"
+
+#Version for lsb test suite
+RELEASE=released-4.1.0
+#Tools of download packages
+WGET="wget -c -t 5"
+SERVER1="\
+http://ftp.linuxfoundation.org/pub/lsb/bundles/${RELEASE}/dist-testkit"
+SERVER2="\
+http://ftp.linux-foundation.org/pub/lsb/app-battery/${RELEASE}/${T_ARCH}"
+SERVER3="http://ftp.linuxfoundation.org/pub/lsb/snapshots/appbat/tests"
+
+
+#Function for downloading package from URL pointed
+download()
+{
+
+ for i in $@; do
+ ECHO " -->Downloading package \"${i}\""
+ PACKAGE_NAME=${i}
+ suffix=${PACKAGE_NAME##*.}
+ if [ "$suffix" = "gz" ];then
+ ${WGET} ${SERVER1}/${i}
+ elif [ "$suffix" = "rpm" ];then
+ ${WGET} ${SERVER2}/${i}
+ else
+ ${WGET} ${SERVER3}/${i}
+ fi
+ done
+}
+
+#Check lsb image
+[ ! -d $DEPLOY_DIR_IMAGE ] && ERROR "\
+Image directory does not exist: ${DEPLOY_DIR_IMAGE}"
+
+ECHO "Entering directory $DEPLOY_DIR_IMAGE"
+cd $DEPLOY_DIR_IMAGE
+
+if [ ! -f ${2} ]; then
+ ECHO "rootfs image \"${2}\" not found in ${DEPLOY_DIR_IMAGE}"
+ ECHO "Please copy \"${2}\" to \"${DEPLOY_DIR_IMAGE}\""
+ exit 1
+fi
+
+#Umount lsbtmp
+[ ! -d lsbtmp ] && mkdir lsbtmp
+
+#Download lsb test suite
+mkdir -p lsb-test-suite-${MACHINE_ARCH} || \
+ERROR "Couldn't find lsb test suite for ${MACHINE_ARCH}"
+cd lsb-test-suite-${MACHINE_ARCH}
+ECHO "Downloading lsb test suite, it would take some time..."
+download ${PACKAGE_LIST}
+
+cd ..
+
+#Creat lsb image
+if [ -f ${LSB_IMAGE} ];then
+ sudo umount lsbtmp > /dev/null 2>&1
+ ECHO "Removing old lsb image..."
+ /bin/rm ${LSB_IMAGE} > /dev/null 2>&1
+fi
+
+ECHO "Creating a 8GB file for the lsb image"
+dd if=/dev/zero of=${LSB_IMAGE} bs=1M count=8000 > /dev/null 2>&1
+exit_check
+
+ECHO "Formatting ext3 image..."
+mkfs.ext3 -q -F ${LSB_IMAGE} > /dev/null 2>&1
+tune2fs -j ${LSB_IMAGE} > /dev/null 2>&1
+
+
+ECHO "Generating final image"
+[ ! -d lsbtmp ] && mkdir lsbtmp
+
+
+#Install file system and lsb test suite to lsb image
+sudo mount -o loop ${LSB_IMAGE} lsbtmp
+exit_check
+
+ECHO " ->Installing rootfs..."
+sudo tar jpxf ${2} -C lsbtmp
+exit_check
+
+ECHO " ->Installing lsb test suite..."
+cd lsb-test-suite-${MACHINE_ARCH}
+if [ "${1}" = "x86-64" ]; then
+ sudo tar zpxf lsb-dist-testkit-4.1.0-5.${P_ARCH}.tar.gz -C ../lsbtmp
+else
+ sudo tar zpxf lsb-dist-testkit-4.1.0-5.${T_ARCH}.tar.gz -C ../lsbtmp
+fi
+exit_check
+
+sudo mkdir ../lsbtmp/lsb-Application
+sudo cp *.rpm *.tar *.pdf ../lsbtmp/lsb-Application
+exit_check
+cd ..
+
+if [ -f modules-*-${MACHINE_ARCH}.tgz ];then
+ECHO " ->Installing moudles of driver..."
+ sudo tar zpxf modules-*-${MACHINE_ARCH}.tgz -C lsbtmp/
+fi
+
+
+#Unmount lsbtmp
+sudo umount lsbtmp
+exit_check
+sudo rm -rf lsbtmp
+
+#Change file attribute
+sudo chown ${OWNER}:${GROUP} ${LSB_IMAGE}
+exit_check
+sudo chmod 755 ${LSB_IMAGE}
+exit_check
+
+#Set up link
+ln -sf ${LSB_IMAGE} poky-image-lsb-${MACHINE_ARCH}.ext3
+
+ECHO "The LSB test environment has been setup successfully."
+ECHO "Please run this image on platform ${MACHINE_ARCH}"
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
new file mode 100755
index 000000000..9a8913db7
--- /dev/null
+++ b/scripts/create-pull-request
@@ -0,0 +1,231 @@
+#!/bin/bash
+#
+# Copyright (c) 2010-2011, Intel Corporation.
+# All Rights Reserved
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+#
+# This script is intended to be used to prepare a series of patches
+# and a cover letter in an appropriate and consistent format for
+# submission to Open Embedded and The Yocto Project, as well as to
+# related projects and layers.
+#
+
+ODIR=pull-$$
+RELATIVE_TO="master"
+COMMIT_ID="HEAD"
+PREFIX="PATCH"
+RFC=0
+
+usage() {
+CMD=$(basename $0)
+cat <<EOM
+Usage: $CMD [-h] [-o output_dir] [-m msg_body_file] [-s subject] [-r relative_to] [-i commit_id] -u remote [-b branch]
+ -b branch Branch name in the specified remote (default: current branch)
+ -c Create an RFC (Request for Comment) patch series
+ -h Display this help message
+ -i commit_id Ending commit (default: HEAD)
+ -m msg_body_file The file containing a blurb to be inserted into the summary email
+ -o output_dir Specify the output directory for the messages (default: pull-PID)
+ -p prefix Use [prefix N/M] instead of [PATCH N/M] as the subject prefix
+ -r relative_to Starting commit (default: master)
+ -s subject The subject to be inserted into the summary email
+ -u remote The git remote where the branch is located
+
+ Examples:
+ $CMD -u contrib -b nitin/basic
+ $CMD -u contrib -r distro/master -i nitin/distro -b nitin/distro
+ $CMD -u contrib -r master -i misc -b nitin/misc -o pull-misc
+ $CMD -u contrib -p "RFC PATCH" -b nitin/experimental
+EOM
+}
+
+# Parse and validate arguments
+while getopts "b:chi:m:o:p:r:s:u:" OPT; do
+ case $OPT in
+ b)
+ BRANCH="$OPTARG"
+ ;;
+ c)
+ RFC=1
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ i)
+ COMMIT_ID="$OPTARG"
+ ;;
+ m)
+ BODY="$OPTARG"
+ if [ ! -e "$BODY" ]; then
+ echo "ERROR: Body file does not exist"
+ exit 1
+ fi
+ ;;
+ o)
+ ODIR="$OPTARG"
+ ;;
+ p)
+ PREFIX="$OPTARG"
+ ;;
+ r)
+ RELATIVE_TO="$OPTARG"
+ ;;
+ s)
+ SUBJECT="$OPTARG"
+ ;;
+ u)
+ REMOTE="$OPTARG"
+ REMOTE_URL=$(git config remote.$REMOTE.url)
+ if [ $? -ne 0 ]; then
+ echo "ERROR: git config failed to find a url for '$REMOTE'"
+ echo
+ echo "To add a remote url for $REMOTE, use:"
+ echo " git config remote.$REMOTE.url <url>"
+ exit 1
+ fi
+
+ # Rewrite private URLs to public URLs
+ # Determine the repository name for use in the WEB_URL later
+ case "$REMOTE_URL" in
+ *@*)
+ USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+ PROTO_RE="[a-z][a-z+]*://"
+ GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
+ REMOTE_URL=${REMOTE_URL%.git}
+ REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
+ REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
+ ;;
+ *)
+ echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
+ echo " The pull and browse URLs will likely be incorrect"
+ ;;
+ esac
+ ;;
+ esac
+done
+
+if [ -z "$BRANCH" ]; then
+ BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
+ echo "NOTE: Assuming remote branch '$BRANCH', use -b to override."
+fi
+
+if [ -z "$REMOTE_URL" ]; then
+ usage
+ exit 1
+fi
+
+if [ $RFC -eq 1 ]; then
+ PREFIX="RFC $PREFIX"
+fi
+
+
+# Set WEB_URL from known remotes
+WEB_URL=""
+case "$REMOTE_URL" in
+ *git.yoctoproject.org*)
+ WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ ;;
+ *git.pokylinux.org*)
+ WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ ;;
+ *git.openembedded.org*)
+ WEB_URL="http://cgit.openembedded.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ ;;
+ *github.com*)
+ WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
+ ;;
+esac
+
+# Perform a sanity test on the web URL. Issue a warning if it is not
+# accessible, but do not abort as users may want to run offline.
+if [ -n "$WEB_URL" ]; then
+ wget --no-check-certificate -q $WEB_URL -O /dev/null
+ if [ $? -ne 0 ]; then
+ echo "WARNING: Branch '$BRANCH' was not found on the contrib git tree."
+ echo " Please check your remote and branch parameter before sending."
+ echo ""
+ fi
+fi
+
+if [ -e $ODIR ]; then
+ echo "ERROR: output directory $ODIR exists."
+ exit 1
+fi
+mkdir $ODIR
+
+
+# Generate the patches and cover letter
+git format-patch -M40 --subject-prefix="$PREFIX" -n -o $ODIR --thread=shallow --cover-letter $RELATIVE_TO..$COMMIT_ID > /dev/null
+
+
+# Customize the cover letter
+CL="$ODIR/0000-cover-letter.patch"
+PM="$ODIR/pull-msg"
+git request-pull $RELATIVE_TO $REMOTE_URL $COMMIT_ID >> "$PM"
+if [ $? -ne 0 ]; then
+ echo "ERROR: git request-pull reported an error"
+ exit 1
+fi
+
+# The cover letter already has a diffstat, remove it from the pull-msg
+# before inserting it.
+sed -n "0,\#$REMOTE_URL# p" "$PM" | sed -i "/BLURB HERE/ r /dev/stdin" "$CL"
+rm "$PM"
+
+# If this is an RFC, make that clear in the cover letter
+if [ $RFC -eq 1 ]; then
+(cat <<EOM
+Please review the following changes for suitability for inclusion. If you have
+any objections or suggestions for improvement, please respond to the patches. If
+you agree with the changes, please provide your Acked-by.
+
+EOM
+) | sed -i "/BLURB HERE/ r /dev/stdin" "$CL"
+fi
+
+# Insert the WEB_URL if there is one
+if [ -n "$WEB_URL" ]; then
+ echo " $WEB_URL" | sed -i "\#$REMOTE_URL# r /dev/stdin" "$CL"
+fi
+
+
+# If the user specified a message body, insert it into the cover letter and
+# remove the BLURB token.
+if [ -n "$BODY" ]; then
+ sed -i "/BLURB HERE/ r $BODY" "$CL"
+ sed -i "/BLURB HERE/ d" "$CL"
+fi
+
+# If the user specified a subject, replace the SUBJECT token with it.
+if [ -n "$SUBJECT" ]; then
+ sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
+fi
+
+
+# Generate report for user
+cat <<EOM
+The following patches have been prepared:
+$(for PATCH in $(ls $ODIR/*); do echo " $PATCH"; done)
+
+Review their content, especially the summary mail:
+ $CL
+
+When you are satisfied, you can send them with:
+ send-pull-request -a -p $ODIR
+EOM
diff --git a/scripts/create-recipe b/scripts/create-recipe
new file mode 100755
index 000000000..aba9ac37d
--- /dev/null
+++ b/scripts/create-recipe
@@ -0,0 +1,1926 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2012 Wind River Systems, Inc.
+#
+# Copyright (C) 2010 Intel Corporation
+#
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, you may create a larger work that contains
+# part or all of the autospectacle output and distribute that work
+# under terms of your choice.
+# Alternatively, if you modify or redistribute autospectacle itself,
+# you may (at your option) remove this special exception.
+#
+# This special exception was modeled after the bison exception
+# (as done by the Free Software Foundation in version 2.2 of Bison)
+#
+
+
+use File::Temp qw(tempdir);
+use File::Path qw(mkpath rmtree);
+use File::Spec ();
+use File::Basename qw(basename dirname);
+
+
+my $name = "";
+my $version = "TO BE FILLED IN";
+my $description = "";
+my $summary = "";
+my $url = "";
+my $configure = "";
+my $localename = "";
+my @sources;
+my @mainfiles;
+my @patches;
+
+my $md5sum = "";
+my $sh256sum = "";
+my @inherits;
+
+my $printed_subpackages = 0;
+my $fulldir = "";
+
+my $builder = "";
+
+
+my $oscmode = 0;
+
+my @banned_pkgconfig;
+my %failed_commands;
+my %failed_libs;
+my %failed_headers;
+
+
+
+######################################################################
+#
+# License management
+#
+# We store the sha1sum of common COPYING files in an associative array
+# %licenses.
+#
+# For all matching sha1's in the tarbal, we then push the result
+# in the @license array (which we'll dedupe at the time of printing).
+#
+
+my %licenses;
+my @license;
+my %lic_files;
+
+sub setup_licenses
+{
+ $licenses{"06877624ea5c77efe3b7e39b0f909eda6e25a4ec"} = "GPLv2";
+ $licenses{"075d599585584bb0e4b526f5c40cb6b17e0da35a"} = "GPLv2";
+ $licenses{"10782dd732f42f49918c839e8a5e2894c508b079"} = "GPLv2";
+ $licenses{"2d29c273fda30310211bbf6a24127d589be09b6c"} = "GPLv2";
+ $licenses{"4df5d4b947cf4e63e675729dd3f168ba844483c7"} = "LGPLv2.1";
+ $licenses{"503df7650052cf38efde55e85f0fe363e59b9739"} = "GPLv2";
+ $licenses{"5405311284eab5ab51113f87c9bfac435c695bb9"} = "GPLv2";
+ $licenses{"5fb362ef1680e635fe5fb212b55eef4db9ead48f"} = "LGPLv2";
+ $licenses{"68c94ffc34f8ad2d7bfae3f5a6b996409211c1b1"} = "GPLv2";
+ $licenses{"66c77efd1cf9c70d4f982ea59487b2eeb6338e26"} = "LGPLv2.1";
+ $licenses{"74a8a6531a42e124df07ab5599aad63870fa0bd4"} = "GPLv2";
+ $licenses{"8088b44375ef05202c0fca4e9e82d47591563609"} = "LGPLv2.1";
+ $licenses{"8624bcdae55baeef00cd11d5dfcfa60f68710a02"} = "GPLv3";
+ $licenses{"8e57ffebd0ed4417edc22e3f404ea3664d7fed27"} = "MIT";
+ $licenses{"99b5245b4714b9b89e7584bfc88da64e2d315b81"} = "BSD";
+ $licenses{"aba8d76d0af67d57da3c3c321caa59f3d242386b"} = "MPLv1.1";
+ $licenses{"bf50bac24e7ec325dbb09c6b6c4dcc88a7d79e8f"} = "LGPLv2";
+ $licenses{"caeb68c46fa36651acf592771d09de7937926bb3"} = "LGPLv2.1";
+ $licenses{"dfac199a7539a404407098a2541b9482279f690d"} = "GPLv2";
+ $licenses{"e60c2e780886f95df9c9ee36992b8edabec00bcc"} = "LGPLv2.1";
+ $licenses{"c931aad3017d975b7f20666cde0953234a9efde3"} = "GPLv2";
+}
+
+sub guess_license_from_file {
+ my ($copying) = @_;
+
+ if (!-e $copying) {
+ return;
+ }
+
+ my $sha1output = `sha1sum $copying`;
+ $sha1output =~ /^([a-zA-Z0-9]*) /;
+ my $sha1 = $1;
+
+ chomp($sha1);
+
+ #
+ # if sha1 matches.. push there result
+ #
+ if (defined($licenses{$sha1})) {
+ my $lic = $licenses{$sha1};
+ push(@license, $lic);
+
+ my $md5output = `md5sum $copying`;
+ $md5output =~ /^([a-zA-Z0-9]*) /;
+ my $md5 = $1;
+ chomp($md5);
+ $lic_files{$copying} = $md5
+ }
+
+ #
+ # We also must make sure that the COPYING/etc files
+ # end up in the main package as %doc..
+ #
+ $copying =~ s/$fulldir//g;
+ $copying =~ s/^\///g;
+ $copying = "\"\%doc " . $copying ."\"";
+
+ push(@mainfiles, $copying);
+}
+
+sub print_license
+{
+ my $count = @license;
+ if ($count == 0) {
+ print OUTFILE "License: TO BE FILLED IN\n";
+ return;
+ }
+
+ # remove dupes
+ undef %saw;
+ @saw{@license} = ();
+ @out = sort keys %saw;
+
+ print OUTFILE "License : ";
+ foreach (@out) {
+ print OUTFILE "$_ ";
+ }
+ print OUTFILE "\n";
+}
+
+# end of license section
+#
+#######################################################################
+
+######################################################################
+#
+# Package group management
+#
+# We set up an associative array of regexp patterns, where the content
+# of the array is the name of the group.
+#
+# These are "strings of regexps", which means one needs to escape
+# everything, and if you want the actual regexp to have a \,
+# it needs to be a \\ in this string.
+
+my %group_patterns;
+my @groups;
+my $group = "TO_BE/FILLED_IN";
+
+sub setup_group_rules
+{
+ $group_patterns{"^\\/usr\\/lib\\/.*so"} = "System/Libraries";
+ $group_patterns{"^\\/lib\\/.*so"} = "System/Libraries";
+ $group_patterns{"^\\/bin\\/.*"} = "Applications/System";
+ $group_patterns{"^\\/sbin\\/.*"} = "Applications/System";
+ $group_patterns{"^\\/usr\\/sbin\\/.*"} = "Applications/System";
+}
+
+sub guess_group_from_file
+{
+ my ($filename) = @_;
+ while (($key,$value) = each %group_patterns) {
+ if ($filename =~ /$key/) {
+ push(@groups, $value);
+ }
+ }
+
+}
+
+# end of group section
+#
+######################################################################
+
+
+######################################################################
+#
+# Files and package section
+#
+# This section creates the %files section, but also decides which
+# subpackages (devel and/or doc) we need to have.
+#
+# We start out with the @allfiles array, which will contain all the
+# files installed by the %build phase of the package. The task is
+# to sort these into @mainfiles, @develfiles and @docfiles.
+# In addition, an attempt is made to compress the files list by
+# replacing full filenames with "*" patterns.
+#
+# For this we use a set of regexps from the @files_match array,
+# which are then used as index to three associative arrays:
+# %files_target : numerical index for which package the regexp
+# would place the file at hand.
+# 0 - main package
+# 1 - devel package
+# 2 - doc package
+# 99 - don't package this at all
+#
+# %files_from: regexp to match the file against for filename-wildcarding
+# %files_to : pattern to append to the ()'d part of %files_from to end up
+# with the filename-wildcard.
+
+my @allfiles;
+my @develfiles;
+my @docfiles;
+
+
+my @files_match;
+my %files_target;
+my %files_from;
+my %files_to;
+
+my $totaldocs = 0;
+
+
+sub add_files_rule
+{
+ my ($match, $target, $from, $to) =@_;
+ push(@files_match, $match);
+ $files_target{"$match"} = $target;
+
+ if (length($from) > 0) {
+ $files_from{"$match"} = $from;
+ }
+
+ if (length($to) > 0) {
+ $files_to{"$match"} = $to;
+ }
+}
+
+sub setup_files_rules
+{
+
+#
+# Files for the Main package
+#
+
+ add_files_rule("^\\/usr\\/lib\\/[a-z0-9A-Z\\_\\-\\.]+\\.so\\.", 0,
+ "(\\/usr\\/lib\\/.*\\.so\\.).*", "\*");
+
+
+ add_files_rule("^\\/usr\\/share\\/omf\\/", 0,
+ "(\\/usr\\/share\\/omf\\/.*?\\/).*", "\*");
+
+#
+# Files for the Devel subpackage
+#
+ add_files_rule("^\\/usr\\/share\\/gir-1\\.0\\/[a-z0-9A-Z\\_\\-\\.]+\\.gir\$", 1,
+ "(\\/usr\\/share\\/gir-1\\.0\/).*", "\*\.gir");
+ add_files_rule("^\\/usr\\/lib\\/girepository-1\\.0\\/[a-z0-9A-Z\\_\\-\\.]+\\.typelib\$", 1,
+ "(\\/usr\\/lib\\/girepository-1\\.0\/).*", "\*\.typelib");
+ add_files_rule("^\\/usr\\/include\\/[a-z0-9A-Z\\_\\-\\.]+\\.h\$", 1,
+ "(\\/usr\\/include\/).*", "\*\.h");
+ add_files_rule("^\\/usr\\/include\\/[a-z0-9A-Z\\_\\-\\.]+\\/.*?\\.h\$", 1,
+ "(\\/usr\\/include\\/[a-z0-9A-Z\\_\\-\\.]+\\/.*?)[a-z0-9A-Z\\_\\-\\.]+\\.h", "\*\.h");
+ add_files_rule("^\\/usr\\/lib\\/[a-z0-9A-Z\\_\\-\\.]+\\.so\$", 1,
+ "(\\/usr\\/lib\\/).*\\.so\$", "\*.so");
+ add_files_rule("^\\/usr\\/lib\\/pkgconfig\\/[a-z0-9A-Z\\_\\-\\.\+]+\\.pc\$", 1,
+ "(\\/usr\\/lib\\/pkgconfig\\/).*\\.pc\$", "\*.pc");
+ add_files_rule("^\\/usr\\/share\\/aclocal", 1, "", "");
+ add_files_rule("^\\/usr\\/lib\\/qt4\\/mkspecs/", 1, "", "");
+
+
+
+
+#
+# Files for the documentation subpackage
+#
+ add_files_rule("^\\/usr\\/share\\/gtk\-doc\\/html\\/[a-z0-9A-Z\\_\\-\\.]+\\/.\*", 2,
+ "(\\/usr\\/share\\/gtk\-doc\\/html\\/[a-z0-9A-Z\\_\\-\\.]+\\/).\*", "\*");
+ add_files_rule("^\\/usr\\/share\\/doc\\/[a-zA-Z0-9\-]*", 2,
+ "(\\/usr\\/share\\/doc\\/[a-zA-Z0-9\-]+\\/).*", "\*");
+ add_files_rule("^\\/usr\\/share\\/man\\/man[0-9]\\/[a-zA-Z0-9\-]*", 2,
+ "(\\/usr\\/share\\/man\\/man[0-9]\\/[a-zA-Z0-9\-]+\\/).*", "\*");
+ add_files_rule("^\\/usr\\/share\\/gnome\\/help\\/", 2,
+ "(\\/usr\\/share\\/gnome\\/help\\/.*?\\/.*?\\/).*", "\*");
+
+
+#
+# Files to just not package at all (picked up by other things)
+#
+ add_files_rule("^\\/usr\\/share\\/locale", 99, "", "");
+ # compiled python things will get auto cleaned by rpm
+# add_files_rule("\.pyo\$", 99, "", "");
+# add_files_rule("\.pyc\$", 99, "", "");
+
+}
+
+sub apply_files_rules
+{
+ my $filenumber = @allfiles;
+
+ if ($filenumber == 0) {
+ return;
+ }
+
+ while (@allfiles > 0) {
+ my $filename = $allfiles[0];
+ my $destname = $filename;
+ my $handled = 0;
+
+#
+# while we're here, try to guess what group our package is
+#
+ guess_group_from_file($filename);
+
+ foreach (@files_match) {
+ my $match = $_;
+
+ if ($filename =~ /$match/) {
+#
+# First try to see if we can turn the full filename into a
+# wildcard based filename
+#
+ if (defined($files_from{$match}) && defined($files_to{$match})) {
+ $from = $files_from{$match};
+ $to = $files_to{$match};
+ $destname =~ s/$from/$1$to/;
+# print "changing $filename to $destname\n";
+ }
+
+# devel package
+ if ($files_target{$match} == 1) {
+ $handled = 1;
+ push(@develfiles, $destname);
+ }
+# doc rules.. also prepend %doc
+ if ($files_target{$match} == 2) {
+ $handled = 1;
+ $destname = "\"%doc " . $destname . "\"";
+ push(@docfiles, $destname);
+ $totaldocs = $totaldocs + 1;
+ }
+# don't package
+ if ($files_target{$match} == 99) {
+ $handled = 1;
+ if ($filename =~ /\/usr\/share\/locale\/.*?\/LC_MESSAGES\/(.*)\.mo/) {
+ $localename = $1;
+ }
+ }
+ }
+ }
+
+
+#
+# if the destination name contains our package version,
+# use %version instead for future maintenance
+#
+ $destname =~ s/$version/\%\{version\}/g;
+ if ($handled == 0) {
+ push(@mainfiles, $destname);
+ }
+ shift(@allfiles);
+ }
+
+#
+# Now.. if we have less than 5 documentation files, just stick them in the main package
+#
+
+ $filenumber = @docfiles;
+
+ if ($filenumber <= 5) {
+ while (@docfiles > 0) {
+ my $filename = $docfiles[0];
+
+ push(@mainfiles, $filename);
+ shift(@docfiles);
+ }
+ }
+
+}
+
+sub print_files
+{
+ my $count = @mainfiles;
+ if ($count == 0) {
+ return;
+ }
+
+ # remove dupes
+ undef %saw;
+ @saw{@mainfiles} = ();
+ @out = sort keys %saw;
+
+ print OUTFILE "Files:\n";
+ foreach (@out) {
+ print OUTFILE " - $_\n";
+ }
+}
+
+sub print_devel
+{
+ my $count = @develfiles;
+ if ($count == 0) {
+ return;
+ }
+ print OUTFILE "SubPackages:\n";
+ $printed_subpackages = 1;
+ print OUTFILE " - Name: devel\n";
+ print OUTFILE " Summary: Development components for the $name package\n";
+ print OUTFILE " Group: Development/Libraries\n";
+ print OUTFILE " Description:\n";
+ print OUTFILE " - Development files for the $name package\n";
+
+ # remove dupes
+ undef %saw;
+ @saw{@develfiles} = ();
+ @out = sort keys %saw;
+
+ print OUTFILE " Files:\n";
+ foreach (@out) {
+ print OUTFILE " - $_\n";
+ }
+}
+
+sub print_doc
+{
+ my $count = @docfiles;
+ if ($count == 0) {
+ return;
+ }
+ if ($printed_subpackages == 0) {
+ print OUTFILE "SubPackages:\n";
+ $printed_subpackages = 1;
+ }
+ print OUTFILE " - Name: docs\n";
+ print OUTFILE " Summary: Documentation components for the $name package\n";
+ print OUTFILE " Group: Documentation\n";
+
+ # remove dupes
+ undef %saw;
+ @saw{@docfiles} = ();
+ @out = sort keys %saw;
+
+ print OUTFILE " Files:\n";
+ foreach (@out) {
+ print OUTFILE " - $_\n";
+ }
+}
+
+
+# end of %files section
+#
+######################################################################
+
+
+######################################################################
+#
+# What we can learn from configure.ac/configure
+#
+# - pkgconfig requirements
+# - regular build requirements
+# - package name / version
+
+
+sub setup_pkgconfig_ban
+{
+ push(@banned_pkgconfig, "^dnl\$");
+ push(@banned_pkgconfig, "^hal\$"); # we don't have nor want HAL
+ push(@banned_pkgconfig, "tslib-0.0"); # we don't want tslib-0.0 (legacy touchscreen interface)
+ push(@banned_pkgconfig, "intel-gen4asm");
+ push(@banned_pkgconfig, "^xp\$"); # xprint - deprecated and not in meego
+ push(@banned_pkgconfig, "^directfb\$"); # we use X, not directfb
+ push(@banned_pkgconfig, "^gtkmm-2.4\$"); # we use X, not directfb
+ push(@banned_pkgconfig, "^evil\$");
+ push(@banned_pkgconfig, "^directfb");
+ push(@banned_pkgconfig, "^sdl ");
+
+
+}
+
+sub setup_failed_commands
+{
+ $failed_commands{"doxygen"} = "doxygen";
+ $failed_commands{"scrollkeeper-config"} = "rarian-compat";
+ $failed_commands{"dot"} = "graphviz";
+ $failed_commands{"flex"} = "flex";
+ $failed_commands{"lex"} = "flex";
+ $failed_commands{"freetype-config"} = "freetype-devel";
+ $failed_commands{"makeinfo"} = "texinfo";
+ $failed_commands{"desktop-file-install"} = "desktop-file-utils";
+ $failed_commands{"deflateBound in -lz"} = "zlib-devel";
+ $failed_commands{"gconftool-2"} = "GConf-dbus";
+ $failed_commands{"jpeglib.h"} = "libjpeg-devel";
+ $failed_commands{"expat.h"} = "expat-devel";
+ $failed_commands{"bison"} = "bison";
+ $failed_commands{"msgfmt"} = "gettext";
+ $failed_commands{"curl-config"} = "libcurl-devel";
+ $failed_commands{"doxygen"} = "doxygen";
+ $failed_commands{"X"} = "pkgconfig(x11)";
+
+ $failed_commands{"gawk"} = "gawk";
+ $failed_commands{"xbkcomp"} = "xkbcomp";
+ $failed_commands{"Vorbis"} = "libvorbis-devel";
+ # checking Expat 1.95.x... no
+ $failed_commands{"Expat 1.95.x"} = "expat-devel";
+ $failed_commands{"xml2-config path"} = "libxml2-devel";
+
+ $failed_libs{"-lz"} = "zlib-devel";
+ $failed_libs{"-lncursesw"} = "ncurses-devel";
+ $failed_libs{"-ltiff"} = "libtiff-devel";
+ $failed_libs{"-lasound"} = "alsa-lib-devel";
+ $failed_libs{"Curses"} = "ncurses-devel";
+
+ $failed_headers{"X11/extensions/randr.h"} = "xrandr";
+ $failed_headers{"X11/Xlib.h"} = "x11";
+ $failed_headers{"X11/extensions/XShm.h"} = "xext";
+ $failed_headers{"X11/extensions/shape.h"} = "xext";
+ $failed_headers{"ncurses.h"} = "ncursesw";
+ $failed_headers{"curses.h"} = "ncursesw";
+ $failed_headers{"pci/pci.h"} = "libpci";
+ $failed_headers{"xf86.h"} = "xorg-server";
+ $failed_headers{"sqlite.h"} = "sqlite3";
+
+ $failed_headers{"X11/extensions/XIproto.h"} = "xi";
+ $failed_headers{"QElapsedTimer"} = "";
+}
+
+
+
+my @package_configs;
+my @buildreqs;
+my $uses_configure = 0;
+
+
+sub push_pkgconfig_buildreq
+{
+ my ($pr) = @_;
+
+ $pr =~ s/\s+//g;
+
+ # remove collateral ] ) etc damage in the string
+ $pr =~ s/\"//g;
+ $pr =~ s/\)//g;
+ $pr =~ s/\]//g;
+ $pr =~ s/\[//g;
+
+
+ # first, undo the space packing
+
+ $pr =~ s/\>\=/ \>\= /g;
+ $pr =~ s/\<\=/ \<\= /g;
+
+ $pr =~ s/\<1.1.1/ /g;
+
+ # don't show configure variables, we can't deal with them
+ if ($pr =~ /^\$/) {
+ return;
+ }
+ if ($pr =~ /AC_SUBST/) {
+ return;
+ }
+
+
+
+
+ # process banned pkgconfig options for things that we don't
+ # have or don't want.
+
+
+ # remore versions that are macros or strings, not numbers
+ $pr =~ s/\s\>\= \$.*//g;
+
+ $pr =~ s/\s\>\= [a-zA-Z]+.*//g;
+
+ # don't show configure variables, we can't deal with them
+ if ($pr =~ /\$/) {
+ return;
+ }
+
+ foreach (@banned_pkgconfig) {
+ my $ban = $_;
+ if ($pr =~ /$ban/) {
+ return;
+ }
+ }
+
+ push(@package_configs, $pr);
+}
+
+#
+# detect cases where we require both a generic pkgconfig, and a version specific
+# case
+#
+sub uniquify_pkgconfig
+{
+ # first remove real dupes
+ undef %saw;
+ @saw{@package_configs} = ();
+ @out = sort keys %saw;
+
+ my $count = 0;
+
+ while ($count < @out) {
+
+ my $entry = $out[$count];
+
+ foreach(@out) {
+ my $compare = $_;
+ if ($entry eq $compare) {
+ next;
+ }
+
+ $compare =~ s/ \>\=.*//g;
+ if ($entry eq $compare) {
+ $out[$count] = "";
+ }
+ }
+ $count = $count + 1;
+ }
+ @package_configs = @out;
+}
+
+
+sub process_configure_ac
+{
+ my ($filename) = @_;
+ my $line = "";
+ my $depth = 0;
+ my $keepgoing = 1;
+ my $buffer = "";
+
+ if (!-e $filename) {
+ return;
+ }
+
+ $uses_configure = 1;
+
+
+
+ open(CONFIGURE, "$filename") || die "Couldn't open $filename\n";
+ seek(CONFIGURE, 0,0) or die "seek : $!";
+ while ($keepgoing && !eof(CONFIGURE)) {
+ $buffer = getc(CONFIGURE);
+
+ if ($buffer eq "(") {
+ $depth = $depth + 1;
+ }
+ if ($buffer eq ")" && $depth > 0) {
+ $depth = $depth - 1;
+ }
+
+ if (!($buffer eq "\n")) {
+ $line = $line . $buffer;
+ }
+
+ if (!($buffer eq "\n") || $depth > 0) {
+ redo unless eof(CONFIGURE);
+ }
+
+ if ($line =~ /PKG_CHECK_MODULES\((.*)\)/) {
+ my $match = $1;
+ $match =~ s/\s+/ /g;
+ $match =~ s/, /,/g;
+ my @pkgs = split(/,/, $match);
+ my $pkg;
+ if (defined($pkgs[1])) {
+ $pkg = $pkgs[1];
+ } else {
+ next;
+ }
+ if ($pkg =~ /\[(.*)\]/) {
+ $pkg = $1;
+ }
+
+ $pkg =~ s/\s+/ /g;
+ # deal with versioned pkgconfig's by removing the spaces around >= 's
+ $pkg =~ s/\>\=\s/\>\=/g;
+ $pkg =~ s/\s\>\=/\>\=/g;
+ $pkg =~ s/\=\s/\=/g;
+ $pkg =~ s/\s\=/\=/g;
+ $pkg =~ s/\<\=\s/\<\=/g;
+ $pkg =~ s/\<\s/\</g;
+ $pkg =~ s/\s\<\=/\<\=/g;
+ $pkg =~ s/\s\</\</g;
+
+ @words = split(/ /, $pkg);
+ foreach(@words) {
+ push_pkgconfig_buildreq($_);
+ }
+ }
+
+ if ($line =~ /PKG_CHECK_EXISTS\((.*)\)/) {
+ my $match = $1;
+ $match =~ s/\s+/ /g;
+ $match =~ s/, /,/g;
+ my @pkgs = split(/,/, $match);
+ my $pkg = $pkgs[0];
+ if ($pkg =~ /\[(.*)\]/) {
+ $pkg = $1;
+ }
+
+ $pkg =~ s/\s+/ /g;
+ # deal with versioned pkgconfig's by removing the spaces around >= 's
+ $pkg =~ s/\>\=\s/\>\=/g;
+ $pkg =~ s/\s\>\=/\>\=/g;
+ $pkg =~ s/\<\=\s/\<\=/g;
+ $pkg =~ s/\<\s/\</g;
+ $pkg =~ s/\s\<\=/\<\=/g;
+ $pkg =~ s/\s\</\</g;
+ $pkg =~ s/\=\s/\=/g;
+ $pkg =~ s/\s\=/\=/g;
+
+ @words = split(/ /, $pkg);
+ foreach(@words) {
+ push_pkgconfig_buildreq($_);
+ }
+ }
+
+ if ($line =~ /XDT_CHECK_PACKAGE\(.*?,.*?\[(.*?)\].*\)/) {
+ my $pkg = $1;
+
+ $pkg =~ s/\s+/ /g;
+ # deal with versioned pkgconfig's by removing the spaces around >= 's
+ $pkg =~ s/\>\=\s/\>\=/g;
+ $pkg =~ s/\s\>\=/\>\=/g;
+ $pkg =~ s/\=\s/\=/g;
+ $pkg =~ s/\s\=/\=/g;
+
+ @words = split(/ /, $pkg);
+ foreach(@words) {
+ push_pkgconfig_buildreq($_);
+ }
+ }
+
+ if ($line =~ /XDT_CHECK_OPTIONAL_PACKAGE\(.*?,.*?\[(.*?)\].*\)/) {
+ my $pkg = $1;
+
+ $pkg =~ s/\s+/ /g;
+ # deal with versioned pkgconfig's by removing the spaces around >= 's
+ $pkg =~ s/\>\=\s/\>\=/g;
+ $pkg =~ s/\s\>\=/\>\=/g;
+ $pkg =~ s/\=\s/\=/g;
+ $pkg =~ s/\s\=/\=/g;
+
+ @words = split(/ /, $pkg);
+ foreach(@words) {
+ push_pkgconfig_buildreq($_);
+ }
+ }
+
+ if ($line =~ /AC_CHECK_LIB\(\[expat\]/) {
+ push(@buildreqs, "expat-devel");
+ }
+ if ($line =~ /AC_CHECK_FUNC\(\[tgetent\]/) {
+ push(@buildreqs, "ncurses-devel");
+ }
+ if ($line =~ /_PROG_INTLTOOL/) {
+ push(@buildreqs, "intltool");
+ }
+ if ($line =~ /GETTEXT_PACKAGE/) {
+ push(@buildreqs, "gettext");
+ }
+ if ($line =~ /GTK_DOC_CHECK/) {
+ push_pkgconfig_buildreq("gtk-doc");
+ }
+ if ($line =~ /GNOME_DOC_INIT/) {
+ push(@buildreqs, "gnome-doc-utils");
+ }
+ if ($line =~ /AM_GLIB_GNU_GETTEXT/) {
+ push(@buildreqs, "gettext");
+ }
+
+ if ($line =~ /AC_INIT\((.*)\)/) {
+ my $match = $1;
+ $match =~ s/\s+/ /g;
+ @acinit = split(/,/, $match);
+# $name = $acinit[0];
+
+ if ($name =~ /\[(.*)\]/) {
+# $name = $1;
+ }
+
+ if (defined($acinit[3])) {
+# $name = $acinit[3];
+ if ($name =~ /\[(.*)\]/) {
+# $name = $1;
+ }
+ }
+ if (defined($acinit[1])) {
+ my $ver = $acinit[1];
+ $ver =~ s/\[//g;
+ $ver =~ s/\]//g;
+ if ($ver =~ /\$/){} else {
+ $version = $ver;
+ $version =~ s/\s+//g;
+ }
+ }
+ }
+ if ($line =~ /AM_INIT_AUTOMAKE\((.*)\)/) {
+ my $match = $1;
+ $match =~ s/\s+/ /g;
+ @acinit = split(/,/, $match);
+# $name = $acinit[0];
+
+ if ($name =~ /\[(.*)\]/) {
+# $name = $1;
+ }
+
+ if (defined($acinit[3])) {
+# $name = $acinit[3];
+ if ($name =~ /\[(.*)\]/) {
+# $name = $1;
+ }
+ }
+ if (defined($acinit[1])) {
+ my $ver = $acinit[1];
+ $ver =~ s/\[//g;
+ $ver =~ s/\]//g;
+ if ($ver =~ /\$/){} else {
+ $version = $ver;
+ $version =~ s/\s+//g;
+ }
+ }
+ }
+
+ $line = "";
+ }
+ close(CONFIGURE);
+}
+
+sub process_qmake_pro
+{
+ my ($filename) = @_;
+ my $line = "";
+ my $depth = 0;
+ my $keepgoing = 1;
+ my $buffer = "";
+ my $prev_char = "";
+
+ if (!-e $filename) {
+ return;
+ }
+
+
+ open(CONFIGURE, "$filename") || die "Couldn't open $filename\n";
+ seek(CONFIGURE, 0,0) or die "seek : $!";
+ while ($keepgoing && !eof(CONFIGURE)) {
+ $buffer = getc(CONFIGURE);
+
+ if ($buffer eq "(") {
+ $depth = $depth + 1;
+ }
+ if ($buffer eq ")" && $depth > 0) {
+ $depth = $depth - 1;
+ }
+
+ if (!($buffer eq "\n")) {
+ $line = $line . $buffer;
+ }
+
+ if (!($buffer eq "\n") || ($prev_char eq "\\") ) {
+ $prev_char = $buffer;
+ redo unless eof(CONFIGURE);
+ }
+ $prev_char = " ";
+
+ if ($line =~ /PKGCONFIG.*?\=(.*)/) {
+ my $l = $1;
+ my @pkgs;
+
+ $l =~ s/\\//g;
+ $l =~ s/\s/ /g;
+ @pkgs = split(/ /, $l);
+ foreach (@pkgs) {
+ if (length($_)>1) {
+ push_pkgconfig_buildreq($_);
+ }
+ }
+ }
+
+ $line = "";
+ }
+ close(CONFIGURE);
+}
+
+#
+# We also check configure if it exists, it's nice for some things
+# because various configure.ac macros have been expanded for us already.
+#
+sub process_configure
+{
+ my ($filename) = @_;
+ my $line = "";
+ my $depth = 0;
+ my $keepgoing = 1;
+
+ if (!-e $filename) {
+ return;
+ }
+
+ $uses_configure = 1;
+
+ open(CONFIGURE, "$filename") || die "Couldn't open $filename\n";
+ seek(CONFIGURE, 0,0) or die "seek : $!";
+ while ($keepgoing && !eof(CONFIGURE)) {
+ $buffer = getc(CONFIGURE);
+
+ if ($buffer eq "(") {
+ $depth = $depth + 1;
+ }
+ if ($buffer eq ")" && $depth > 0) {
+ $depth = $depth - 1;
+ }
+
+ if (!($buffer eq "\n")) {
+ $line = $line . $buffer;
+ }
+
+ if (!($buffer eq "\n") || $depth > 0) {
+ redo unless eof(CONFIGURE);
+ }
+
+
+
+ if ($line =~ /^PACKAGE_NAME=\'(.*?)\'/) {
+ $name = $1;
+ }
+ if ($line =~ /^PACKAGE_TARNAME=\'(.*?)\'/) {
+ $name = $1;
+ }
+ if ($line =~ /^PACKAGE_VERSION=\'(.*?)\'/) {
+ $version = $1;
+ $version =~ s/\s+//g;
+ }
+ if ($line =~ /^PACKAGE_URL=\'(.*?)\'/) {
+ if (length($1) > 2) {
+ $url = $1;
+ }
+ }
+
+
+ $line = "";
+ }
+ close(CONFIGURE);
+}
+
+sub print_pkgconfig
+{
+ my $count = @package_configs;
+ if ($count == 0) {
+ return;
+ }
+
+ uniquify_pkgconfig();
+
+ print OUTFILE "PkgConfigBR:\n";
+ foreach (@out) {
+ $line = $_;
+ $line =~ s/^\s+//g;
+ if (length($line) > 1) {
+ print OUTFILE " - $line\n";
+ }
+ }
+}
+
+sub print_buildreq
+{
+ my $count = @buildreqs;
+ if ($count == 0) {
+ return;
+ }
+
+ # remove dupes
+ undef %saw;
+ @saw{@buildreqs} = ();
+ @out = sort keys %saw;
+
+ print OUTFILE "PkgBR:\n";
+ foreach (@out) {
+ print OUTFILE " - $_\n";
+ }
+}
+
+
+# end of configure section
+#
+######################################################################
+
+
+######################################################################
+#
+# Guessing the Description and Summary for a package
+#
+# We'll look at various sources of information for this:
+# - spec files in the package
+# - debain files in the package
+# - DOAP files in the package
+# - pkgconfig files in the package
+# - the README file in the package
+# - freshmeat.net online
+#
+
+sub guess_description_from_spec {
+ my ($specfile) = @_;
+
+ my $state = 0;
+ my $cummul = "";
+
+ open(SPEC, $specfile);
+ while (<SPEC>) {
+ my $line = $_;
+ if ($state == 1 && $line =~ /^\%/) {
+ $state = 2;
+ }
+ if ($state == 1) {
+ $cummul = $cummul . $line;
+ }
+ if ($state==0 && $line =~ /\%description/) {
+ $state = 1;
+ }
+
+ if ($line =~ /Summary:\s*(.*)/ && length($summary) < 2) {
+ $summary = $1;
+ }
+ if ($line =~ /URL:\s*(.*)/ && length($url) < 2) {
+ $url = $1;
+ }
+ }
+ close(SPEC);
+ if (length($cummul) > 4) {
+ $description = $cummul;
+ }
+}
+
+#
+# DOAP is a project to create an XML/RDF vocabulary to describe software projects, and in particular open source.
+# so if someone ships a .doap file... we can learn from it.
+#
+sub guess_description_from_doap {
+ my ($doapfile) = @_;
+
+ open(DOAP, $doapfile);
+ while (<DOAP>) {
+ my $line = $_;
+ # <shortdesc xml:lang="en">Virtual filesystem implementation for gio</shortdesc>
+ if ($line =~ /\<shortdesc .*?\>(.*)\<\/shortdesc\>/) {
+ $summary = $1;
+ }
+ if ($line =~ /\<homepage .*?resource=\"(.*)\"\s*\/>/) {
+ $url = $1;
+ }
+ }
+ close(DOAP);
+}
+
+#
+# Debian control files have some interesting fields we can glean information
+# from as well.
+#
+sub guess_description_from_debian_control {
+ my ($file) = @_;
+
+ my $state = 0;
+ my $cummul = "";
+
+ $file = $file . "/debian/control";
+
+ open(FILE, $file) || return;
+ while (<FILE>) {
+ my $line = $_;
+ if ($state == 1 && length($line) < 2) {
+ $state = 2;
+ }
+ if ($state == 1) {
+ $cummul = $cummul . $line;
+ }
+ if ($state==0 && $line =~ /\Description: (.*)/) {
+ $state = 1;
+ $cummul = $1;
+ }
+
+ }
+ close(FILE);
+ if (length($cummul) > 4) {
+ $description = $cummul;
+ }
+}
+
+#
+# the pkgconfig files have often a one line description
+# of the software... good for Summary
+#
+sub guess_description_from_pkgconfig {
+ my ($file) = @_;
+
+ open(FILE, $file);
+ while (<FILE>) {
+ my $line = $_;
+
+ if ($line =~ /Description:\s*(.*)/ && length($summary) < 2) {
+ $summary = $1;
+ }
+ }
+ close(FILE);
+}
+
+#
+# Freshmeat can provide us with a good one paragraph description
+# of the software..
+#
+sub guess_description_from_freshmeat {
+ my ($tarname) = @_;
+ my $cummul = "";
+ my $state = 0;
+ open(HTML, "curl -s http://freshmeat.net/projects/$tarname |");
+ while (<HTML>) {
+ my $line = $_;
+
+ if ($state == 1) {
+ $cummul = $cummul . $line;
+ }
+ if ($state == 0 && $line =~ /\<div class\=\"project-detail\"\>/) {
+ $state = 1;
+ }
+ if ($state == 1 && $line =~/\<\/p\>/) {
+ $state = 2;
+ }
+ }
+ close(HTML);
+ $cummul =~ s/\<p\>//g;
+ $cummul =~ s/\r//g;
+ $cummul =~ s/\<\/p\>//g;
+ $cummul =~ s/^\s*//g;
+ if (length($cummul)>10) {
+ $description = $cummul;
+ }
+}
+#
+# If all else fails, just take the first paragraph of the
+# readme file
+#
+sub guess_description_from_readme {
+ my ($file) = @_;
+
+ my $state = 0;
+ my $cummul = "";
+
+ open(FILE, $file);
+ while (<FILE>) {
+ my $line = $_;
+ if ($state == 1 && $line =~ /^\n/ && length($cummul) > 80) {
+ $state = 2;
+ }
+ if ($state == 0 && length($line)>1) {
+ $state = 1;
+ }
+ if ($state == 1) {
+ $cummul = $cummul . $line;
+ }
+ if ($line =~ /(http\:\/\/.*$name.*\.org)/) {
+ my $u = $1;
+ if ($u =~ /bug/ || length($url) > 1) {
+ } else {
+ $url = $u;
+ }
+ }
+ }
+ close(FILE);
+ if (length($cummul) > 4 && length($description)<3) {
+ $description = $cummul;
+ }
+}
+
+#
+# Glue all the guesses together
+#
+sub guess_description {
+ my ($directory) = @_;
+
+
+ @files = <$directory/README*>;
+ foreach (@files) {
+ guess_description_from_readme($_);
+ }
+
+ if (length($name)>2) {
+ guess_description_from_freshmeat($name);
+ }
+
+ @files = <$directory/*.spec*>;
+ foreach (@files) {
+ guess_description_from_spec($_);
+ }
+
+ guess_description_from_debian_control($directory);
+
+ $name =~ s/ //g;
+ @files = <$directory/$name.pc*>;
+ foreach (@files) {
+ guess_description_from_pkgconfig($_);
+ }
+ @files = <$directory/*.pc.*>;
+ foreach (@files) {
+ guess_description_from_pkgconfig($_);
+ }
+ @files = <$directory/*.pc>;
+ foreach (@files) {
+ guess_description_from_pkgconfig($_);
+ }
+ @files = <$directory/*.doap>;
+ foreach (@files) {
+ guess_description_from_doap($_);
+ }
+
+ if (length($summary) < 2) {
+ $summary = $description;
+ $summary =~ s/\n/ /g;
+ $summary =~ s/\s+/ /g;
+ if ($summary =~ /(.*?)\./) {
+ $summary = $1;
+ }
+ }
+
+}
+
+# end of Description / Summary section
+#
+######################################################################
+
+
+
+#
+# Build the package, and wait for rpm to complain about unpackaged
+# files.... which we then use as basis for our %files section
+#
+sub guess_files_from_rpmbuild {
+ my $infiles = 0;
+ open(OUTPUTF, "rpmbuild --nodeps --define \"\%_sourcedir $orgdir \" -ba $name.spec 2>&1 |");
+ while (<OUTPUTF>) {
+ my $line2 = $_;
+
+ if ($infiles == 1 && $line2 =~ /RPM build errors/) {
+ $infiles = 2;
+ }
+ if ($infiles == 1 && $line2 =~ /^Building/) {
+ $infiles = 2;
+ }
+
+ if ($infiles == 1) {
+ $line2 =~ s/\s*//g;
+ push(@allfiles, $line2);
+ }
+ if ($line2 =~ / Installed \(but unpackaged\) file\(s\) found\:/) {
+ $infiles = 1;
+ }
+ }
+ close(OUTPUTF);
+ if (@allfiles == 0) {
+ print "Build failed ... stopping here.\n";
+ exit(0);
+ }
+
+}
+
+sub guess_files_from_oscbuild {
+ my $infiles = 0;
+ my $restart = 0;
+ my $mustrestart = 0;
+ my $rcount = 0;
+ my $done_python = 0;
+
+ system("osc addremove &> /dev/null");
+ system("osc ci -m \"Initial import by autospectacle\" &> /dev/null");
+
+retry:
+ if ($restart > 0) {
+ write_yaml();
+ print "Restarting the build\n";
+ }
+ $restart = 0;
+ $infiles = 0;
+ $mustrestart = 0;
+ open(OUTPUTF, "osc build --no-verify $name.spec 2>&1 |");
+ while (<OUTPUTF>) {
+ my $line2 = $_;
+
+# print "line is $line2\n";
+ if ($infiles == 1 && $line2 =~ /RPM build errors/) {
+ $infiles = 2;
+ }
+ if ($infiles == 1 && $line2 =~ /^Building/) {
+ $infiles = 2;
+ }
+ if ($infiles == 1) {
+ $line2 =~ s/\s*//g;
+ push(@allfiles, $line2);
+ }
+ if ($line2 =~ /No package \'(.*)\' found/) {
+ push_pkgconfig_buildreq("$1");
+ $restart = $restart + 1;
+ print " Adding pkgconfig($1) requirement\n";
+ }
+ if ($line2 =~ /Package requirements \((.*?)\) were not met/) {
+ $pkg = $1;
+ # deal with versioned pkgconfig's by removing the spaces around >= 's
+ $pkg =~ s/\>\=\s/\>\=/g;
+ $pkg =~ s/\s\>\=/\>\=/g;
+ $pkg =~ s/\=\s/\=/g;
+ $pkg =~ s/\s\=/\=/g;
+ my @req = split(/ /,$pkg);
+ foreach (@req) {
+ push_pkgconfig_buildreq("$_");
+
+ $restart = $restart + 1;
+ print " Adding pkgconfig($_) requirement\n";
+ }
+ }
+ if ($line2 =~ /which: no qmake/) {
+ $restart += 1;
+ push_pkgconfig_buildreq("Qt");
+ print " Adding Qt requirement\n";
+ }
+ if ($line2 =~ /Cannot find development files for any supported version of libnl/) {
+ $restart += 1;
+ push_pkgconfig_buildreq("libnl-1");
+ print " Adding libnl requirement\n";
+ }
+ if ($line2 =~ /<http:\/\/www.cmake.org>/) {
+ $restart += 1;
+ push(@buildreqs, "cmake");
+ print " Adding cmake requirement\n";
+ }
+ if ($line2 =~ /checking for (.*?)\.\.\. not_found/ || $line2 =~ /checking for (.*?)\.\.\. no/ || $line2 =~ /checking (.*?)\.\.\. no/) {
+ $pkg = $1;
+ while (($key,$value) = each %failed_commands) {
+ if ($pkg eq $key) {
+ push(@buildreqs, $value);
+ print " Adding $value requirement\n";
+ $restart += $restart + 1;
+ $mustrestart = 1;
+ }
+ }
+
+ }
+
+ if ($line2 =~ /checking for [a-zA-Z0-9\_]+ in (.*?)\.\.\. no/) {
+ $pkg = $1;
+ while (($key,$value) = each %failed_libs) {
+ if ($pkg eq $key) {
+ push(@buildreqs, $value);
+ print " Adding $value requirement\n";
+ $restart += $restart + 1;
+ $mustrestart = 1;
+ }
+ }
+
+ }
+
+ if ($line2 =~ /-- Could NOT find ([a-zA-Z0-9]+)/) {
+ $pkg = $1;
+ while (($key,$value) = each %failed_libs) {
+ if ($pkg eq $key) {
+ push(@buildreqs, $value);
+ print " Adding $value requirement\n";
+ $restart += $restart + 1;
+ $mustrestart = 1;
+ }
+ }
+
+ }
+
+ if ($line2 =~ /fatal error\: (.*)\: No such file or directory/) {
+ $pkg = $1;
+ while (($key,$value) = each %failed_headers) {
+ if ($pkg eq $key) {
+ push_pkgconfig_buildreq($value);
+ print " Adding $value requirement\n";
+ $restart += $restart + 1;
+ }
+ }
+
+ }
+ if ($line2 =~ /checking for UDEV\.\.\. no/) {
+ print " Adding pkgconfig(udev) requirement\n";
+ push_pkgconfig_buildreq("udev");
+ }
+ if ($line2 =~ /checking for Apache .* module support/) {
+ print " Adding pkgconfig(httpd-devel) requirement\n";
+ push(@buildreqs, "httpd-devel");
+ if ($rcount < 3) {
+ $restart = $restart + 1;
+ }
+ }
+ if ($line2 =~ /([a-zA-Z0-9\-\_]*)\: command not found/i) {
+ my $cmd = $1;
+ my $found = 0;
+
+ while (($key,$value) = each %failed_commands) {
+ if ($cmd eq $key) {
+ push(@buildreqs, $value);
+ print " Adding $value requirement\n";
+ $restart += $restart + 1;
+ $mustrestart = 1;
+ $found = 1;
+ }
+ }
+
+ if ($found < 1) {
+ print " Command $cmd not found!\n";
+ }
+ }
+ if ($line2 =~ /checking for.*in -ljpeg... no/) {
+ push(@buildreqs, "libjpeg-devel");
+ print " Adding libjpeg-devel requirement\n";
+ $restart = $restart + 1;
+ }
+ if ($line2 =~ /fatal error\: zlib\.h\: No such file or directory/) {
+ push(@buildreqs, "zlib-devel");
+ print " Adding zlib-devel requirement\n";
+ $restart = $restart + 1;
+ }
+ if ($line2 =~ /error\: xml2-config not found/) {
+ push_pkgconfig_buildreq("libxml-2.0");
+ print " Adding libxml2-devel requirement\n";
+ $restart = $restart + 1;
+ }
+ if ($line2 =~ /checking \"location of ncurses\.h file\"/) {
+ push(@buildreqs, "ncurses-devel");
+ print " Adding ncurses-devel requirement\n";
+ $restart = $restart + 1;
+ }
+ if (($line2 =~ / \/usr\/include\/python2\.6$/ || $line2 =~ / to compile python extensions/) && $done_python == 0) {
+ push(@buildreqs, "python-devel");
+ print " Adding python-devel requirement\n";
+ $restart = $restart + 1;
+ $done_python = 1;
+ }
+ if ($line2 =~ /error: must install xorg-macros 1.6/) {
+ push_pkgconfig_buildreq("xorg-macros");
+ print " Adding xorg-macros requirement\n";
+ $restart = $restart + 1;
+ }
+ if ($line2 =~ /installing .*?.gmo as [a-zA-Z0-9\-\.\/\_]+?\/([a-zA-Z0-9\-\_\.]+)\.mo$/) {
+ my $loc = $1;
+ if ($loc eq $localename) {} else {
+ print " Changing localename from $localename to $loc\n";
+ $localename = $loc;
+ $restart = $restart + 1;
+ }
+ }
+
+ if ($infiles == 0 && $line2 =~ / Installed \(but unpackaged\) file\(s\) found\:/) {
+ $infiles = 1;
+ }
+ }
+ close(OUTPUTF);
+ if (@allfiles == 0 || $mustrestart > 0) {
+ if ($restart >= 1)
+ {
+ $rcount = $rcount + 1;
+ if ($rcount < 10) {
+ goto retry;
+ }
+ }
+ print "Build failed ... stopping here.\n";
+ exit(0);
+ }
+
+}
+
+sub process_rpmlint {
+ my $infiles = 0;
+
+
+ if ($oscmode == 0) {
+ return;
+ }
+
+ print "Verifying package ....\n";
+
+ system("osc addremove &> /dev/null");
+ system("osc ci -m \"Final import by autospectacle\" &> /dev/null");
+
+ open(OUTPUTF, "osc build --no-verify $name.spec 2>&1 |");
+ while (<OUTPUTF>) {
+ my $line2 = $_;
+
+# print "line is $line2\n";
+ if ($infiles == 1 && $line2 =~ /RPM build errors/) {
+ $infiles = 2;
+ }
+ if ($infiles == 1 && $line2 =~ /^Building/) {
+ $infiles = 2;
+ }
+ if ($infiles == 1) {
+ $line2 =~ s/\s*//g;
+ push(@allfiles, $line2);
+ }
+ if ($infiles == 0 && $line2 =~ / Installed \(but unpackaged\) file\(s\) found\:/) {
+ $infiles = 1;
+ }
+ }
+ close(OUTPUTF);
+
+}
+
+sub guess_name_from_url {
+ my ($bigurl) = @_;
+
+ @spliturl = split(/\//, $bigurl);
+ while (@spliturl > 1) {
+ shift(@spliturl);
+ }
+ my $tarfile = $spliturl[0];
+
+ if ($tarfile =~ /(.*?)\-([0-9\.\-\~]+)[-\.].*?\.tar/) {
+ $name = $1;
+ $version = $2;
+ $version =~ s/\-/\_/g;
+ }
+}
+
+############################################################################
+#
+# Output functions
+#
+
+sub print_name_and_description
+{
+ my @lines;
+
+ print OUTFILE "Name : $name\n";
+ print OUTFILE "Version : $version\n";
+ print OUTFILE "Release : 1\n";
+
+ # remove dupes
+ undef %saw;
+ @saw{@groups} = ();
+ @out = sort keys %saw;
+
+ if (@out == 1) {
+ foreach (@out) {
+ print OUTFILE "Group : $_\n";
+ }
+ } else {
+ print OUTFILE "Group : $group\n";
+ }
+ #
+ # Work around spectacle bug
+ $summary =~ s/\:\s/ /g;
+ $summary =~ s/^([a-z])/\u$1/ig;
+ $summary =~ s/\@//g;
+ $summary = substr($summary, 0, 79);
+
+ $summary =~ s/\.^//g;
+ if (length($summary) < 1) {
+ $summary = "TO BE FILLED IN";
+ }
+ #
+ print OUTFILE "Summary : $summary\n";
+ print OUTFILE "Description: |\n";
+
+ $description =~ s/&quot;/\"/g;
+ $description =~ s/\@//g;
+ @lines = split(/\n/, $description);
+ foreach (@lines) {
+ print OUTFILE " $_\n";
+ }
+ if (length($url)>1) {
+ print OUTFILE "URL : $url\n";
+ }
+
+ # remove dupes
+ undef %saw;
+ @saw{@sources} = ();
+ @out = sort keys %saw;
+
+ print OUTFILE "Sources : \n";
+ foreach (@out) {
+ $source = $_;
+ $source =~ s/$version/\%\{version\}/g;
+
+ print OUTFILE " - $source\n";
+ }
+
+ if (@patches > 0) {
+ print OUTFILE "Patches: \n";
+ foreach (@patches) {
+ my $patch = $_;
+ print OUTFILE " - $patch\n";
+ }
+ }
+
+ print OUTFILE "\n";
+ if (length($configure)>2) {
+ print OUTFILE "Configure : $configure\n";
+ }
+ if (length($localename) > 2) {
+ print OUTFILE "LocaleName : $localename\n";
+ }
+ if (length($builder) > 2) {
+ print OUTFILE "Builder : $builder\n";
+ }
+}
+
+sub write_makefile
+{
+ open(MAKEFILE, ">Makefile");
+
+ print MAKEFILE "PKG_NAME := $name\n";
+ print MAKEFILE "SPECFILE = \$(addsuffix .spec, \$(PKG_NAME))\n";
+ print MAKEFILE "YAMLFILE = \$(addsuffix .yaml, \$(PKG_NAME))\n";
+ print MAKEFILE "\n";
+ print MAKEFILE "include /usr/share/packaging-tools/Makefile.common\n";
+
+ close(MAKEFILE);
+}
+
+sub write_changelog
+{
+ open(CHANGELOG, ">$name.changes");
+ $date = ` date +"%a %b %d %Y"`;
+ chomp($date);
+ print CHANGELOG "* $date - Autospectacle <autospectacle\@meego.com> - $version\n";
+ print CHANGELOG "- Initial automated packaging\n";
+ close(CHANGELOG);
+}
+
+sub write_yaml
+{
+ open(OUTFILE, ">$name.yaml");
+ print_name_and_description();
+ print_license();
+ print_pkgconfig();
+ print_buildreq();
+ print_files();
+ print_devel();
+ print_doc();
+ close(OUTFILE);
+
+ write_makefile();
+ write_changelog();
+
+ system("rm $name.spec 2>/dev/null");
+ system("specify &> /dev/null");
+ if ($oscmode > 0) {
+ system("osc addremove");
+ system("osc ci -m \"Import by autospectacle\" &> /dev/null");
+ }
+
+}
+
+sub write_bbfile
+{
+ open(BBFILE, ">${name}_$version.bb");
+ print BBFILE "SUMMARY = \"$summary\"\n";
+ print BBFILE "DESCRIPTION = \"$description\"\n";
+
+ print BBFILE "LICENSE = \"@license\"\n";
+ print BBFILE "LIC_FILES_CHKSUM = \"";
+ foreach (keys %lic_files) {
+ print BBFILE "file://" . basename($_) . ";md5=$lic_files{$_} \\\n";
+ }
+ print BBFILE "\"\n\n";
+
+ if (@license <= 0) {
+ print "Can NOT get license from package itself.\n";
+ print "Please update the license and license file manually.\n";
+ }
+
+ if (@buildreqs > 0) {
+ my %saw;
+ my @out = grep(!$saw{$_}++,@buildreqs);
+ print BBFILE "DEPENDS = \"@out\"\n\n";
+ };
+
+ print BBFILE 'PR = "r0"' . "\n\n";
+ print BBFILE "SRC_URI = \"";
+ foreach (@sources) {
+ print BBFILE "$_ \\\n";
+ }
+ print BBFILE "\"\n\n";
+ print BBFILE "SRC_URI[md5sum] = \"$md5sum\"\n";
+ print BBFILE "SRC_URI[sha256sum] = \"$sha256sum\"\n";
+
+ if (@inherits) {
+ print BBFILE "inherit ";
+ foreach (@inherits) {
+ print BBFILE "$_ ";
+ }
+ }
+
+ close(BBFILE);
+
+ my $curdir = `pwd`;
+ chomp($curdir);
+ print "Create bb file: $curdir/${name}_$version.bb\n";
+}
+
+sub calculate_sums
+{
+ @_ = basename $dir;
+ my $md5output = `md5sum @_`;
+ $md5output =~ /^([a-zA-Z0-9]*) /;
+ $md5sum = $1;
+ chomp($md5sum);
+ my $sha256output = `sha256sum @_`;
+ $sha256output =~ /^([a-zA-Z0-9]*) /;
+ $sha256sum = $1;
+ chomp($sha256sum);
+}
+
+############################################################################
+#
+# Main program
+#
+
+if ( @ARGV < 1 ) {
+ print "Usage: $0 <url-of-source-tarballs>\n";
+ exit(1);
+}
+
+if (@ARGV > 1) {
+ my $i = 1;
+ while ($i < @ARGV) {
+ my $patch = $ARGV[$i];
+ print "Adding patch $patch\n";
+ push(@patches, $patch);
+ $i++;
+ }
+}
+
+setup_licenses();
+setup_files_rules();
+setup_group_rules();
+setup_pkgconfig_ban();
+setup_failed_commands();
+
+if (-e ".osc/_packages") {
+ $oscmode = 1;
+}
+
+my $tmpdir = tempdir();
+
+$dir = $ARGV[0];
+guess_name_from_url($dir);
+push(@sources, $dir);
+
+#system("cd $tmpdir; curl -s -O $dir");
+$orgdir = `pwd`;
+chomp($orgdir);
+my $outputdir = $name;
+if (! $name) {
+ $outputdir = basename $dir;
+}
+mkpath($outputdir);
+chdir($outputdir);
+print "Downloading package: $dir\n";
+system("wget --quiet $dir") == 0 or die "Download $dir failed.";
+
+calculate_sums($outputdir);
+
+print "Unpacking to : $tmpdir\n";
+
+my @tgzfiles = <$orgdir/$outputdir/*.tgz>;
+foreach (@tgzfiles) {
+ my $tgz = basename $_;
+ my $tar = $tgz;
+ $tar =~ s/tgz/tar\.gz/g;
+ $dir =~ s/tgz/tar\.gz/g;
+ system("mv $orgdir/$outputdir/$tgz $orgdir/$outputdir/$tar");
+ guess_name_from_url($dir);
+}
+
+#
+# I really really hate the fact that meego deleted the -a option from tar.
+# this is a step backwards in time that is just silly.
+#
+
+
+system("cd $tmpdir; tar -jxf $orgdir/$outputdir/*\.tar\.bz2 &>/dev/null");
+system("cd $tmpdir; tar -zxf $orgdir/$outputdir/*\.tar\.gz &>/dev/null");
+print "Parsing content ....\n";
+my @dirs = <$tmpdir/*>;
+foreach (@dirs) {
+ $dir = $_;
+}
+
+$fulldir = $dir;
+
+if ( -e "$dir/autogen.sh" ) {
+ $configure = "autogen";
+ $uses_configure = 1;
+ push(@inherits, "autotools");
+}
+if ( -e "$dir/BUILD-CMAKE" ) {
+ $configure = "cmake";
+ push(@buildreqs, "cmake");
+ $uses_configure = 1;
+ push(@inherits, "cmake");
+}
+
+if ( -e "$dir/configure" ) {
+ $configure = "";
+}
+
+my @files = <$dir/configure.*>;
+
+my $findoutput = `find $dir -name "configure.ac" 2>/dev/null`;
+my @findlist = split(/\n/, $findoutput);
+foreach (@findlist) {
+ push(@files, $_);
+}
+foreach (@files) {
+ process_configure_ac("$_");
+}
+
+$findoutput = `find $dir -name "*.pro" 2>/dev/null`;
+@findlist = split(/\n/, $findoutput);
+foreach (@findlist) {
+ process_qmake_pro("$_");
+}
+
+if (-e "$dir/$name.pro") {
+ $builder = "qmake";
+ push_pkgconfig_buildreq("Qt");
+ push(@inherits, "qmake2");
+}
+
+
+#
+# This is a good place to generate configure.in
+#
+if (length($configure) > 2) {
+ if ($configure eq "autogen") {
+ system("cd $dir ; ./autogen.sh &> /dev/null");
+ }
+}
+@files = <$dir/configure>;
+foreach (@files) {
+ process_configure("$_");
+}
+
+if ($uses_configure == 0) {
+ $configure = "none";
+}
+
+@files = <$dir/COPY*>;
+foreach (@files) {
+ guess_license_from_file("$_");
+}
+
+@files = <$dir/LICENSE*>;
+foreach (@files) {
+ guess_license_from_file("$_");
+}
+
+
+@files = <$dir/GPL*>;
+foreach (@files) {
+ guess_license_from_file("$_");
+}
+
+
+guess_description($dir);
+
+#
+# Output of bbfile file
+#
+write_bbfile();
+chdir($orgdir);
+exit 0;
+
+#
+# Output of the yaml file
+#
+
+
+if ($oscmode == 1) {
+ print "Creating OBS project $name ...\n";
+ system("osc mkpac $name &> /dev/null");
+ system("mkdir $name &> /dev/null");
+ chdir($name);
+ system("mv ../$name*\.tar\.* .");
+}
+
+write_yaml();
+print "Building package ....\n";
+
+if ($oscmode == 0) {
+ guess_files_from_rpmbuild();
+} else {
+ guess_files_from_oscbuild();
+}
+
+apply_files_rules();
+
+$printed_subpackages = 0;
+write_yaml();
+
+process_rpmlint();
+
+print "Spectacle creation complete.\n";
diff --git a/scripts/gen-site-config b/scripts/gen-site-config
new file mode 100755
index 000000000..7da7a0bd8
--- /dev/null
+++ b/scripts/gen-site-config
@@ -0,0 +1,53 @@
+#! /bin/sh
+# Copyright (c) 2005-2008 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+cat << EOF
+AC_PREREQ(2.57)
+AC_INIT([site_wide],[1.0.0])
+
+EOF
+
+# Disable as endian is set in the default config
+#echo AC_C_BIGENDIAN
+#echo
+
+if [ -e $1/types ] ; then
+ while read type ; do
+ echo "AC_CHECK_SIZEOF([$type])"
+ done < $1/types
+
+ echo
+fi
+
+if [ -e $1/funcs ]; then
+ while read func ; do
+ echo "AC_CHECK_FUNCS([$func])"
+ done < $1/funcs
+
+ echo
+fi
+
+if [ -e $1/headers ]; then
+ while read header ; do
+ echo "AC_CHECK_HEADERS([$header])"
+ done < $1/headers
+
+ echo
+fi
+
+cat << EOF
+AC_OUTPUT
+EOF
diff --git a/scripts/help2man b/scripts/help2man
new file mode 100755
index 000000000..2bb8d868b
--- /dev/null
+++ b/scripts/help2man
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exit 1
diff --git a/scripts/hob b/scripts/hob
new file mode 100755
index 000000000..4a821cb08
--- /dev/null
+++ b/scripts/hob
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+export BB_ENV_EXTRAWHITE="DISABLE_SANITY_CHECKS $BB_ENV_EXTRAWHITE"
+DISABLE_SANITY_CHECKS=1 bitbake -u hob
+
+ret=$?
+exit $ret
diff --git a/scripts/jhbuild/jhbuild2oe.py b/scripts/jhbuild/jhbuild2oe.py
index ef292763d..9b31cafb6 100755
--- a/scripts/jhbuild/jhbuild2oe.py
+++ b/scripts/jhbuild/jhbuild2oe.py
@@ -161,9 +161,9 @@ class Handlers(object):
# create the package
d = bb.data.init()
pn = self.packagename(element.attrib.get('id'))
- bb.data.setVar('PN', pn, d)
+ d.setVar('PN', pn)
bb.data.setVar('DEPENDS', ' '.join(deps), d)
- bb.data.setVar('_handler', 'metamodule', d)
+ d.setVar('_handler', 'metamodule')
self.packages.append(d)
def autotools(self, element, parent):
@@ -181,23 +181,23 @@ class Handlers(object):
if id is None:
raise Exception('Error: autotools element has no id attribute.')
pn = self.packagename(id)
- bb.data.setVar('PN', pn, d)
+ d.setVar('PN', pn)
if deps is not None:
bb.data.setVar('DEPENDS', ' '.join(deps), d)
if branch is not None:
# <branch repo="git.freedesktop.org" module="xorg/xserver"/>
repo = os.path.join(self.repositories[branch.attrib.get('repo')], branch.attrib.get('module'))
- bb.data.setVar('SRC_URI', repo, d)
+ d.setVar('SRC_URI', repo)
checkoutdir = branch.attrib.get('checkoutdir')
if checkoutdir is not None:
bb.data.setVar('S', os.path.join('${WORKDIR}', checkoutdir), d)
# build class
- bb.data.setVar('INHERITS', 'autotools', d)
- bb.data.setVarFlag('INHERITS', 'operator', '+=', d)
- bb.data.setVar('_handler', 'autotools', d)
+ d.setVar('INHERITS', 'autotools')
+ d.setVarFlag('INHERITS', 'operator', '+=')
+ d.setVar('_handler', 'autotools')
self.packages.append(d)
class Emitter(object):
@@ -209,7 +209,7 @@ class Emitter(object):
def __init__(self, filefunc = None, basedir = None):
def _defaultfilefunc(package):
# return a relative path to the bitbake .bb which will be written
- return bb.data.getVar('PN', package, 1) + '.bb'
+ return package.getVar('PN', 1) + '.bb'
self.filefunc = filefunc or _defaultfilefunc
self.basedir = basedir or os.path.abspath(os.curdir)
@@ -226,16 +226,16 @@ class Emitter(object):
f.close()
for key in bb.data.keys(package):
- fdata = fdata.replace('@@'+key+'@@', bb.data.getVar(key, package))
+ fdata = fdata.replace('@@'+key+'@@', package.getVar(key))
else:
for key in bb.data.keys(package):
if key == '_handler':
continue
elif key == 'INHERITS':
- fdata += 'inherit %s\n' % bb.data.getVar('INHERITS', package)
+ fdata += 'inherit %s\n' % package.getVar('INHERITS')
else:
- oper = bb.data.getVarFlag(key, 'operator', package) or '='
- fdata += '%s %s "%s"\n' % (key, oper, bb.data.getVar(key, package))
+ oper = package.getVarFlag(key, 'operator') or '='
+ fdata += '%s %s "%s"\n' % (key, oper, package.getVar(key))
if not os.path.exists(os.path.join(self.basedir, os.path.dirname(self.filefunc(package)))):
os.makedirs(os.path.join(self.basedir, os.path.dirname(self.filefunc(package))))
@@ -254,8 +254,8 @@ def _test():
def filefunc(package):
# return a relative path to the bitbake .bb which will be written
- src_uri = bb.data.getVar('SRC_URI', package, 1)
- filename = bb.data.getVar('PN', package, 1) + '.bb'
+ src_uri = package.getVar('SRC_URI', 1)
+ filename = package.getVar('PN', 1) + '.bb'
if not src_uri:
return filename
else:
diff --git a/scripts/multilib_header_wrapper.h b/scripts/multilib_header_wrapper.h
new file mode 100644
index 000000000..5a8754088
--- /dev/null
+++ b/scripts/multilib_header_wrapper.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2005-2011 by Wind River Systems, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ */
+
+#include <bits/wordsize.h>
+
+#ifdef __WORDSIZE
+
+#if __WORDSIZE == 32
+
+#ifdef _MIPS_SIM
+
+#if _MIPS_SIM == _ABIO32
+#include <ENTER_HEADER_FILENAME_HERE-32.h>
+#elif _MIPS_SIM == _ABIN32
+#include <ENTER_HEADER_FILENAME_HERE-n32.h>
+#else
+#error "Unknown _MIPS_SIM"
+#endif
+
+#else /* _MIPS_SIM is not defined */
+#include <ENTER_HEADER_FILENAME_HERE-32.h>
+#endif
+
+#elif __WORDSIZE == 64
+#include <ENTER_HEADER_FILENAME_HERE-64.h>
+#else
+#error "Unknown __WORDSIZE detected"
+#endif /* matches #if __WORDSIZE == 32 */
+
+#else /* __WORDSIZE is not defined */
+
+#error "__WORDSIZE is not defined"
+
+#endif
+
diff --git a/scripts/native-intercept/chown b/scripts/native-intercept/chown
new file mode 100755
index 000000000..4f43271c2
--- /dev/null
+++ b/scripts/native-intercept/chown
@@ -0,0 +1,2 @@
+#! /bin/sh
+echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/oe-buildenv-internal b/scripts/oe-buildenv-internal
new file mode 100755
index 000000000..32c0ba02d
--- /dev/null
+++ b/scripts/oe-buildenv-internal
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+# OE-Core Build Environment Setup Script
+#
+# Copyright (C) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+# It is assumed OEROOT is already defined when this is called
+if [ -z "$OEROOT" ]; then
+ echo >&2 "Error: OEROOT is not defined!"
+ return 1
+fi
+
+if [ "x$BDIR" = "x" ]; then
+ if [ "x$1" = "x" ]; then
+ BDIR="build"
+ else
+ BDIR="$1"
+ if [ "$BDIR" = "/" ]; then
+ echo >&2 "Error: / is not supported as a build directory."
+ return 1
+ fi
+
+ # Remove any possible trailing slashes. This is used to work around
+ # buggy readlink in Ubuntu 10.04 that doesn't ignore trailing slashes
+ # and hence "readlink -f new_dir_to_be_created/" returns empty.
+ BDIR=`echo $BDIR | sed -re 's|/+$||'`
+
+ BDIR=`readlink -f "$BDIR"`
+ if [ -z "$BDIR" ]; then
+ PARENTDIR=`dirname "$1"`
+ echo >&2 "Error: the directory $PARENTDIR does not exist?"
+ return 1
+ fi
+ fi
+ if [ "x$2" != "x" ]; then
+ BITBAKEDIR="$2"
+ fi
+fi
+if expr "$BDIR" : '/.*' > /dev/null ; then
+ BUILDDIR="$BDIR"
+else
+ BUILDDIR="`pwd`/$BDIR"
+fi
+unset BDIR
+
+if [ "x$BITBAKEDIR" = "x" ]; then
+ BITBAKEDIR="$OEROOT/bitbake$BBEXTRA/"
+fi
+
+BITBAKEDIR=`readlink -f "$BITBAKEDIR"`
+BUILDDIR=`readlink -f "$BUILDDIR"`
+
+if ! (test -d "$BITBAKEDIR"); then
+ echo >&2 "Error: The bitbake directory ($BITBAKEDIR) does not exist! Please ensure a copy of bitbake exists at this location"
+ return 1
+fi
+
+PATH="${OEROOT}/scripts:$BITBAKEDIR/bin/:$PATH"
+unset BITBAKEDIR
+
+# Used by the runqemu script
+export BUILDDIR
+export PATH
+export BB_ENV_EXTRAWHITE="MACHINE DISTRO TCMODE TCLIBC http_proxy ftp_proxy https_proxy all_proxy ALL_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY SDKMACHINE BB_NUMBER_THREADS PARALLEL_MAKE GIT_PROXY_COMMAND GIT_PROXY_IGNORE SOCKS5_PASSWD SOCKS5_USER SCREENDIR"
diff --git a/scripts/oe-find-native-sysroot b/scripts/oe-find-native-sysroot
new file mode 100755
index 000000000..b2c22aaf4
--- /dev/null
+++ b/scripts/oe-find-native-sysroot
@@ -0,0 +1,72 @@
+#!/bin/bash
+#
+# Find a native sysroot to use - either from an in-tree OE build or
+# from a toolchain installation. It then ensures the variable
+# $OECORE_NATIVE_SYSROOT is set to the sysroot's base directory, and sets
+# $PSEUDO to the path of the pseudo binary.
+#
+# This script is intended to be run within other scripts by source'ing
+# it, e.g:
+#
+# SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot`
+# . $SYSROOT_SETUP_SCRIPT
+#
+# This script will terminate execution of your calling program unless
+# you set a variable $SKIP_STRICT_SYSROOT_CHECK to a non-empty string
+# beforehand.
+#
+# Copyright (c) 2010 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+if [ -z "$OECORE_NATIVE_SYSROOT" ]; then
+ BITBAKE=`which bitbake 2> /dev/null`
+ if [ "x$BITBAKE" != "x" ]; then
+ if [ "$UID" = "0" ]; then
+ # Root cannot run bitbake unless sanity checking is disabled
+ if [ ! -d "./conf" ]; then
+ echo "Error: root cannot run bitbake by default, and I cannot find a ./conf directory to be able to disable sanity checking"
+ exit 1
+ fi
+ touch conf/sanity.conf
+ OECORE_NATIVE_SYSROOT=`bitbake -e | grep ^STAGING_DIR_NATIVE | cut -d '=' -f2 | cut -d '"' -f2`
+ rm -f conf/sanity.conf
+ else
+ OECORE_NATIVE_SYSROOT=`bitbake -e | grep ^STAGING_DIR_NATIVE | cut -d '=' -f2 | cut -d '"' -f2`
+ fi
+ else
+ echo "Error: Unable to locate your native sysroot."
+ echo "Did you forget to source the build environment setup script?"
+
+ if [ -z "$SKIP_STRICT_SYSROOT_CHECK" ]; then
+ exit 1
+ fi
+ fi
+fi
+
+# Set up pseudo command
+if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/pseudo" ]; then
+ echo "Error: Unable to find pseudo binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
+
+ if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
+ echo "Have you run 'bitbake meta-ide-support'?"
+ else
+ echo "This shouldn't happen - something is wrong with your toolchain installation"
+ fi
+
+ if [ -z "$SKIP_STRICT_SYSROOT_CHECK" ]; then
+ exit 1
+ fi
+fi
+PSEUDO="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
diff --git a/scripts/poky-git-proxy-command b/scripts/oe-git-proxy-command
index d31f85abd..d31f85abd 100755
--- a/scripts/poky-git-proxy-command
+++ b/scripts/oe-git-proxy-command
diff --git a/scripts/oe-git-proxy-socks-command b/scripts/oe-git-proxy-socks-command
new file mode 100755
index 000000000..8acffb524
--- /dev/null
+++ b/scripts/oe-git-proxy-socks-command
@@ -0,0 +1,23 @@
+#! /bin/bash
+SCRIPTDIR=`dirname $0`
+# Check oe-git-proxy-socks exists
+PROXYSOCKS=`which oe-git-proxy-socks 2> /dev/null`
+if [ -z "$PROXYSOCKS" -a -e "$SCRIPTDIR/oe-git-proxy-socks.c" ]; then
+ # If not try and build it
+ gcc $SCRIPTDIR/oe-git-proxy-socks.c -o $SCRIPTDIR/oe-git-proxy-socks
+fi
+PROXYSOCKS=`which oe-git-proxy-socks 2> /dev/null`
+if [ ! -x "$PROXYSOCKS" ]; then
+ # If that fails, we can see if netcat (nc) is available
+ NETCAT=`which nc 2> /dev/null`
+ if [ ! -x "$NETCAT" ]; then
+ # If that fails, explain to the user
+ echo "Unable to find oe-git-proxy-socks. This is usually created with the command"
+ echo "'gcc scripts/oe-git-proxy-socks.c -o scripts/oe-git-proxy-socks' which we tried"
+ echo "but it doesn't seem to have worked. Please compile the binary manually."
+ echo "Alternativly, install nc (netcat) on this machine."
+ exit 1
+ fi
+ exec $NETCAT -x $GIT_PROXY_HOST:$GIT_PROXY_PORT "$@"
+fi
+oe-git-proxy-socks -S $GIT_PROXY_HOST:$GIT_PROXY_PORT $@
diff --git a/scripts/poky-git-proxy-socks.c b/scripts/oe-git-proxy-socks.c
index f5747117a..f5747117a 100644
--- a/scripts/poky-git-proxy-socks.c
+++ b/scripts/oe-git-proxy-socks.c
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
new file mode 100755
index 000000000..560453e09
--- /dev/null
+++ b/scripts/oe-setup-builddir
@@ -0,0 +1,126 @@
+#!/bin/sh
+
+# OE Build Environment Setup Script
+#
+# Copyright (C) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+if [ -z "$BUILDDIR" ]; then
+ echo >&2 "Error: The build directory (BUILDDIR) must be set!"
+ exit 1
+fi
+
+mkdir -p $BUILDDIR/conf
+
+if ! (test -d "$BUILDDIR"); then
+ echo >&2 "Error: The builddir ($BUILDDIR) does not exist!"
+ exit 1
+fi
+
+if ! (test -w "$BUILDDIR"); then
+ echo >&2 "Error: Cannot write to $BUILDDIR, perhaps try sourcing with a writable path? i.e. . oe-init-build-env ~/my-build"
+ exit 1
+fi
+
+cd "$BUILDDIR"
+
+TEMPLATECONF=${TEMPLATECONF:-meta/conf}
+
+#
+# $TEMPLATECONF can point to a directory for the template local.conf & bblayers.conf
+#
+if [ "x" != "x$TEMPLATECONF" ]; then
+ if ! (test -d "$TEMPLATECONF"); then
+ # Allow TEMPLATECONF=meta-xyz/conf as a shortcut
+ if [ -d "$OEROOT/$TEMPLATECONF" ]; then
+ TEMPLATECONF="$OEROOT/$TEMPLATECONF"
+ fi
+ if ! (test -d "$TEMPLATECONF"); then
+ echo >&2 "Error: '$TEMPLATECONF' must be a directory containing local.conf & bblayers.conf"
+ return
+ fi
+ fi
+ OECORELAYERCONF="$TEMPLATECONF/bblayers.conf.sample"
+ OECORELOCALCONF="$TEMPLATECONF/local.conf.sample"
+fi
+
+if [ "x" = "x$OECORELOCALCONF" ]; then
+ OECORELOCALCONF="$OEROOT/meta/conf/local.conf.sample"
+fi
+if ! (test -r "$BUILDDIR/conf/local.conf"); then
+cat <<EOM
+You had no conf/local.conf file. This configuration file has therefore been
+created for you with some default values. You may wish to edit it to use a
+different MACHINE (target hardware) or enable parallel build options to take
+advantage of multiple cores for example. See the file for more information as
+common configuration options are commented.
+
+The Yocto Project has extensive documentation about OE including a reference manual
+which can be found at:
+ http://yoctoproject.org/documentation
+
+For more information about OpenEmbedded see their website:
+ http://www.openembedded.org/
+
+EOM
+ cp -f $OECORELOCALCONF $BUILDDIR/conf/local.conf
+fi
+
+if [ "x" = "x$OECORELAYERCONF" ]; then
+ OECORELAYERCONF="$OEROOT/meta/conf/bblayers.conf.sample"
+fi
+if ! (test -r "$BUILDDIR/conf/bblayers.conf"); then
+cat <<EOM
+You had no conf/bblayers.conf file. The configuration file has been created for
+you with some default values. To add additional metadata layers into your
+configuration please add entries to this file.
+
+The Yocto Project has extensive documentation about OE including a reference manual
+which can be found at:
+ http://yoctoproject.org/documentation
+
+For more information about OpenEmbedded see their website:
+ http://www.openembedded.org/
+
+
+EOM
+
+ # Put the abosolute path to the layers in bblayers.conf so we can run
+ # bitbake without the init script after the first run
+ sed "s|##COREBASE##|$OEROOT|g" $OECORELAYERCONF > $BUILDDIR/conf/bblayers.conf
+fi
+
+# Prevent disturbing a new GIT clone in same console
+unset OECORELOCALCONF
+unset OECORELAYERCONF
+
+cat <<EOM
+
+### Shell environment set up for builds. ###
+
+You can now run 'bitbake <target>'
+
+Common targets are:
+ core-image-minimal
+ core-image-sato
+ meta-toolchain
+ meta-toolchain-sdk
+ adt-installer
+ meta-ide-support
+
+You can also run generated qemu images with a command like 'runqemu qemux86'
+
+EOM
diff --git a/scripts/oe-setup-rpmrepo b/scripts/oe-setup-rpmrepo
new file mode 100755
index 000000000..ea885f632
--- /dev/null
+++ b/scripts/oe-setup-rpmrepo
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# This utility setup the necessary metadata for an rpm repo
+#
+# Copyright (c) 2011 Intel Corp.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+# Don't use TMPDIR from the external environment, it may be a distro
+# variable pointing to /tmp (e.g. within X on OpenSUSE)
+# Instead, use OE_TMPDIR for passing this in externally.
+TMPDIR="$OE_TMPDIR"
+
+function usage() {
+ echo "Usage: $0 <rpm-dir>"
+ echo " <rpm-dir>: default is $TMPDIR/deploy/rpm"
+}
+
+if [ $# -gt 1 ]; then
+ usage
+ exit 1
+fi
+
+setup_tmpdir() {
+ if [ -z "$TMPDIR" ]; then
+ # Try to get TMPDIR from bitbake
+ type -P bitbake &>/dev/null || {
+ echo "In order for this script to dynamically infer paths";
+ echo "to kernels or filesystem images, you either need";
+ echo "bitbake in your PATH or to source oe-init-build-env";
+ echo "before running this script" >&2;
+ exit 1; }
+
+ # We have bitbake in PATH, get TMPDIR from bitbake
+ TMPDIR=`bitbake -e | grep ^TMPDIR=\" | cut -d '=' -f2 | cut -d '"' -f2`
+ if [ -z "$TMPDIR" ]; then
+ echo "Error: this script needs to be run from your build directory,"
+ echo "or you need to explicitly set TMPDIR in your environment"
+ exit 1
+ fi
+ fi
+}
+
+setup_sysroot() {
+ # Toolchain installs set up $OECORE_NATIVE_SYSROOT in their
+ # environment script. If that variable isn't set, we're
+ # either in an in-tree poky scenario or the environment
+ # script wasn't source'd.
+ if [ -z "$OECORE_NATIVE_SYSROOT" ]; then
+ setup_tmpdir
+ BUILD_ARCH=`uname -m`
+ BUILD_OS=`uname | tr '[A-Z]' '[a-z]'`
+ BUILD_SYS="$BUILD_ARCH-$BUILD_OS"
+
+ OECORE_NATIVE_SYSROOT=$TMPDIR/sysroots/$BUILD_SYS
+ fi
+}
+
+setup_tmpdir
+setup_sysroot
+
+
+if [ -n "$1" ]; then
+ RPM_DIR="$1"
+else
+ RPM_DIR="$TMPDIR/deploy/rpm"
+fi
+
+if [ ! -d "$RPM_DIR" ]; then
+ echo "Error: rpm dir $RPM_DIR doesn't exist"
+ exit 1
+fi
+
+CREATEREPO=$OECORE_NATIVE_SYSROOT/usr/bin/createrepo
+if [ ! -e "$CREATEREPO" ]; then
+ echo "Error: can't find createrepo binary"
+ echo "please run bitbake createrepo-native first"
+ exit 1
+fi
+
+
+$CREATEREPO "$RPM_DIR"
+
+exit 0
diff --git a/scripts/oe-trim-schemas b/scripts/oe-trim-schemas
new file mode 100755
index 000000000..29fb3a1b6
--- /dev/null
+++ b/scripts/oe-trim-schemas
@@ -0,0 +1,49 @@
+#! /usr/bin/env python
+
+import sys
+try:
+ import xml.etree.cElementTree as etree
+except:
+ import xml.etree.ElementTree as etree
+
+def child (elem, name):
+ for e in elem.getchildren():
+ if e.tag == name:
+ return e
+ return None
+
+def children (elem, name=None):
+ l = elem.getchildren()
+ if name:
+ l = [e for e in l if e.tag == name]
+ return l
+
+xml = etree.parse(sys.argv[1])
+
+for schema in child(xml.getroot(), "schemalist").getchildren():
+ e = child(schema, "short")
+ if e is not None:
+ schema.remove(e)
+
+ e = child(schema, "long")
+ if e is not None:
+ schema.remove(e)
+
+ for locale in children(schema, "locale"):
+ # One locale must exist so leave C locale...
+ a = locale.attrib.get("name")
+ if a == 'C':
+ continue
+ e = child(locale, "default")
+ if e is None:
+ schema.remove(locale)
+ else:
+ e = child(locale, "short")
+ if e is not None:
+ locale.remove(e)
+ e = child(locale, "long")
+ if e is not None:
+ locale.remove(e)
+
+xml.write(sys.stdout, "UTF-8")
+
diff --git a/scripts/poky-chroot-run b/scripts/poky-chroot-run
deleted file mode 100755
index f1f4dec6a..000000000
--- a/scripts/poky-chroot-run
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-#
-# Runs a command within a Poky chroot
-#
-
-XEPHYR=`which Xephyr`
-if [ ! -n "$XEPHYR" -o ! -x "$XEPHYR" ]; then
- echo "You need to install Xephyr to use $0"
- exit 1
-fi
-
-CHROOTUID=`which chrootuid`
-if [ ! -n "$CHROOTUID" -o ! -x "$CHROOTUID" ]; then
- echo "You need to install Xephyr to use $0"
- exit 1
-fi
-
-
-case $# in
- 0)
- echo "Invalid arguments."
- echo "$ $0 <target> [command]"
- exit 1
- ;;
- 1)
- ROOTFS=$1
- shift
- # Set $1 to be the boot script
- set -- /usr/bin/poky-chroot-launch
- ;;
- *)
- ROOTFS=$1
- shift
- # Now $1 onwards are the command and arguments to run
- ;;
-esac
-
-test -f "$ROOTFS/.pokychroot" || { echo "$ROOTFS is not setup for use as a Poky chroot." ; exit 1 ;}
-
-set -e
-
-# chrootuid doesn't handle relative paths, so ensure that the rootfs path is
-# absolute
-if test ${ROOTFS:0:1} != /; then
- ROOTFS="$(pwd)/$ROOTFS"
-fi
-
-safe_mount() {
- if ! mountpoint -q "$ROOTFS/$1"; then
- sudo mount --bind $1 "$ROOTFS/$1"
- fi
-}
-safe_umount() {
- if mountpoint -q "$ROOTFS/$1"; then
- sudo umount "$ROOTFS/$1"
- fi
-}
-
-# Mount the directories we need
-for m in /dev /dev/pts /dev/shm /proc /sys /tmp; do
- safe_mount $m
-done
-
-# Set up the environment
-export PATH=/bin:/usr/bin:/sbin:/usr/sbin
-export HOME=/home/$USER
-
-if [ ! -f "$ROOTFS/.pokychroot.init" ]; then
- sudo $CHROOTUID -i "$ROOTFS" $USER /bin/sh -c "/usr/bin/poky-chroot-init"
- touch "$ROOTFS/.pokychroot.init"
-fi
-
-$XEPHYR :1 -ac -screen 640x480x16 &
-
-# Go go go!
-sudo $CHROOTUID -i "$ROOTFS" $USER "$@" || /bin/true
-
-# Trap term signals so we don't kill ourselves
-trap true TERM
-# send term signal to the process group
-kill -- -$$
-
-# Unmount TODO: only umount if there are no other sessions active, somehow.
-for m in /tmp /sys /proc /dev/shm /dev/pts /dev; do
- safe_umount $m
-done
diff --git a/scripts/poky-chroot-setup b/scripts/poky-chroot-setup
deleted file mode 100755
index 209efd6df..000000000
--- a/scripts/poky-chroot-setup
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-#
-# Script to extract a poky qemux86 rootfs and prepare it for
-# use as a chroot
-#
-
-set -e
-
-case $# in
- 2)
- TGZ=$1
- TARGET=$2
- ;;
- *)
- echo "Invalid arguments, please run as:"
- echo "$ $0 <qemux86-rootfs.tar.gz> <target-directory>"
- exit 1
-esac
-
-echo "Extracting $TGZ into $TARGET"
-
-test -d "$TARGET" && { echo "$TARGET already exists, please remove and retry or specify a dirferent directory." ; exit 1 ; }
-mkdir --parents "$TARGET"
-
-tar -C "$TARGET" --exclude ./dev/\* -jxp -f "$TGZ"
-echo "HAVE_TOUCHSCREEN=0" >> "$TARGET/etc/formfactor/machconfig"
-echo "DISPLAY_WIDTH_PIXELS=640" >> "$TARGET/etc/formfactor/machconfig"
-echo "DISPLAY_HEIGHT_PIXELS=480" >> "$TARGET/etc/formfactor/machconfig"
-cp /etc/passwd "$TARGET/etc/passwd"
-cp /etc/resolv.conf "$TARGET/etc/resolv.conf"
-touch "$TARGET/.pokychroot"
diff --git a/scripts/poky-env-internal b/scripts/poky-env-internal
deleted file mode 100755
index 4c87e16f6..000000000
--- a/scripts/poky-env-internal
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/bin/sh
-
-# Poky Build Enviroment Setup Script
-#
-# Copyright (C) 2006-2007 OpenedHand Ltd.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-
-# Change this to the location of this file.
-# Also update the locations at the top of conf/local.conf
-
-OEROOT=`pwd`
-if [ "x$BDIR" = "x" ]; then
- if [ "x$1" = "x" ]; then
- BDIR="build"
- else
- BDIR="$1"
- fi
-fi
-BUILDDIR="$OEROOT/$BDIR/"
-unset BDIR
-BITBAKEDIR="$OEROOT/bitbake$BBEXTRA/"
-PKGDIR="$OEROOT/meta/"
-BBPATH="$BITBAKEDIR $PKGDIR"
-
-#
-# Add in any extra meta-* repositories to BBPATH
-#
-METAREPOS=`cd $OEROOT; find . -maxdepth 1 -name 'meta-*' -type d| sed -e 's#./##g'`
-for repo in $METAREPOS
-do
- # meta-openmoko is disabled by default - see local.conf.sample to enable it
- if [ $repo = "meta-openmoko" ]; then
- continue
- fi
- # meta-extras is disabled by default - see local.conf.sample to enable it
- if [ $repo = "meta-extras" ]; then
- continue
- fi
- if [ -e $OEROOT/$repo/poky-extra-environment ]; then
- . $OEROOT/$repo/poky-extra-environment
- fi
- BBPATH=" $BBPATH $OEROOT/$repo"
-done
-
-BBPATH="$BBPATH $HOME/.oe $HOME/.poky $BUILDDIR"
-
-#
-# Remove any symlinks from BBPATH
-#
-NEWPATH=""
-for p in $BBPATH
-do
- p2=`readlink -f "$p"`
- NEWPATH="$p2:$NEWPATH"
-done
-BBPATH="$NEWPATH"
-
-
-MSG=''
-
-BUILD_ARCH=`uname -m`
-BUILD_OS=`uname | tr '[A-Z]' '[a-z]'`
-BUILD_SYS="$BUILD_ARCH-$BUILD_OS"
-
-PATH="$BITBAKEDIR/bin/:$OEROOT/scripts:$PATH"
-
-cd "$BUILDDIR"
-
-# Remove any symlinks from paths
-BITBAKEDIR=`readlink -f "$BITBAKEDIR"`
-PKGDIR=`readlink -f "$PKGDIR"`
-BUILDDIR=`readlink -f "$BUILDDIR"`
-
-if ! (test -d "$BITBAKEDIR" && test -d "$PKGDIR" && test -d "$BUILDDIR"); then
- echo >&2 "Error: Not all directories exist! Did you run this script in poky directory?"
- return
-fi
-
-if [ "x" = "x$POKYLOCALCONF" ]; then
- POKYLOCALCONF="$BUILDDIR/conf/local.conf.sample"
-fi
-
-if ! (test -r "$BUILDDIR/conf/local.conf"); then
- cp -f $POKYLOCALCONF $BUILDDIR/conf/local.conf
-fi
-
-export BBPATH OEROOT BUILD_SYS BUILDDIR
-
-# Kill off the TERMINFO variable, as glibc will grab its contents in its 'make
-# install' if set
-unset TERMINFO
-# kill this so we don't link non-arm libraries
-unset LD_LIBRARY_PATH
-# Don't export TARGET_ARCH - it *will* cause build failures
-export PATH LD_LIBRARY_PATH OEROOT
-# Stop multi byte characters breaking the patcher stuff - This is for Redhat / Fedora people really
-export LANG=C
-# Kill PYTHONPATH as otherwise the Python specified there will conflict with
-# python-native.
-unset PYTHONPATH
-# Don't allow custom GConf schema paths to get into the system
-unset GCONF_SCHEMA_INSTALL_SOURCE
-
-echo
-echo "### Shell environment set up for Poky builds. ###"
-echo
-
-export BB_ENV_EXTRAWHITE="MACHINE DISTRO POKYMODE POKYLIBC OEROOT http_proxy ftp_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY"
diff --git a/scripts/poky-git-proxy-socks-command b/scripts/poky-git-proxy-socks-command
deleted file mode 100755
index a5af2d33f..000000000
--- a/scripts/poky-git-proxy-socks-command
+++ /dev/null
@@ -1,2 +0,0 @@
-#! /bin/bash
-poky-git-proxy-socks -S $GIT_PROXY_HOST:$GIT_PROXY_PORT $@
diff --git a/scripts/poky-nokia800-flashutil b/scripts/poky-nokia800-flashutil
deleted file mode 100755
index f1ffa5ae6..000000000
--- a/scripts/poky-nokia800-flashutil
+++ /dev/null
@@ -1,64 +0,0 @@
-#! /bin/sh
-# Copyright (C) 2008 OpenedHand Ltd.
-# Contact: andrew@openedhand.com
-#
-# Erase the partition given in $3 (default: rootfs) and flash the contents
-# of image given in $1 into the image $2.
-
-if [ ! -r "$1" ]; then
- echo "Usage: $0 <image> <destimage> [<partition>]"
- exit -1
-fi
-
-uboot_offset=0
-config_offset=64
-kernel_offset=256
-initfs_offset=1280
-rootfs_offset=2304 # chinook
-
-# This value should be selected for Diablo based firmwares
-# It also require patching qemu to get proper size of flash partitions
-# (by default qemu has Chinook split).
-#rootfs_offset=3328 # diablo
-
-if [ ! -e "$2" ]; then
- echo "foo"
- # Making an empty/erased flash image. Need a correct echo behavior.
- dd if=/dev/zero of=$2 bs=268435456 count=0 seek=1
- bash -c 'echo -en \\0377\\0377\\0377\\0377\\0377\\0377\\0377\\0377 > .8b'
- cat .8b .8b > .16b # OOB is 16 bytes
- cat .16b .16b .16b .16b .16b .16b .16b .16b > .8sec
- cat .8sec .8sec .8sec .8sec .8sec .8sec .8sec .8sec > .64sec
- cat .64sec .64sec .64sec .64sec .64sec .64sec .64sec .64sec > .512sec
- cat .512sec .512sec .512sec .512sec > .2ksec
- cat .2ksec .2ksec .2ksec .2ksec .2ksec .2ksec .2ksec .2ksec > .16k
- cat .16k .16k .16k .16k .16k .16k .16k .16k > .128k
- # N800 NAND is 512k sectors big
- cat .128k .128k .128k .128k >> $2
- rm -rf .8b .16b .8sec .64sec .512sec .2ksec .16k .128k
-fi
-
-if [ "$3" != "" ]; then
- case "$3" in
- config)
- partition=/dev/mtd1
- page=$config_offset
- ;;
- initfs)
- partition=/dev/mtd3
- page=$initfs_offset
- ;;
- rootfs)
- partition=/dev/mtd4
- page=$rootfs_offset
- ;;
- *)
- echo "Unknown partition $2"
- exit -1
- esac
-else
- partition=/dev/mtd4
- page=$rootfs_offset
-fi
-
-dd if=$1 of=$2 conv=notrunc bs=2048 seek=$page
diff --git a/scripts/poky-qemu b/scripts/poky-qemu
deleted file mode 100755
index be070d702..000000000
--- a/scripts/poky-qemu
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/bash
-
-# Handle running Poky images standalone with QEMU
-#
-# Copyright (C) 2006-2007 OpenedHand Ltd.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-if [ "x$1" = "x" ]; then
- MYNAME=`basename $0`
- echo "Run as MACHINE=xyz $MYNAME ZIMAGE IMAGEFILE [OPTIONS]"
- echo "where:"
- echo " ZIMAGE - the kernel image file to use"
- echo " IMAGEFILE - the image file/location to use"
- echo " (NFS booting assumed if IMAGEFILE not specified)"
- echo " MACHINE=xyz - the machine name (optional, autodetected from ZIMAGE if unspecified)"
- echo " OPTIONS - extra options to pass to QEMU"
- exit 1
-else
- ZIMAGE=$1
- shift
-fi
-
-if [ "x$MACHINE" = "x" ]; then
- MACHINE=`basename $ZIMAGE | sed -r -e 's#.*-([a-z]+[0-9]*)-?[0-9]*..*#\1#'`
-fi
-
-if [ "x$1" = "x" ]; then
- TYPE="nfs"
-else
- TYPE="ext3"
- if [ "$MACHINE" = "akita" ]; then
- TYPE="jffs2"
- fi
- if [ "$MACHINE" = "spitz" ]; then
- TYPE="ext3"
- fi
- if [ "$MACHINE" = "nokia800" ]; then
- TYPE="jffs2"
- fi
- if [ "$MACHINE" = "nokia800-maemo" ]; then
- TYPE="jffs2"
- fi
- HDIMAGE=$1
- shift
-fi
-
-INTERNAL_SCRIPT=`which poky-qemu-internal`
-
-. $INTERNAL_SCRIPT
diff --git a/scripts/poky-qemu-ifdown b/scripts/poky-qemu-ifdown
deleted file mode 100755
index 221235311..000000000
--- a/scripts/poky-qemu-ifdown
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-# QEMU network interface configuration script
-#
-# Copyright (C) 2006-2007 OpenedHand Ltd.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-IFCONFIG=`which ifconfig`
-if [ "x$IFCONFIG" = "x" ]; then
- # better than nothing...
- IFCONFIG=/sbin/ifconfig
-fi
-
-$IFCONFIG tap0 down
diff --git a/scripts/poky-qemu-ifup b/scripts/poky-qemu-ifup
deleted file mode 100755
index cb1891e27..000000000
--- a/scripts/poky-qemu-ifup
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-# QEMU network interface configuration script
-#
-# Copyright (C) 2006-2007 OpenedHand Ltd.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-IFCONFIG=`which ifconfig`
-if [ "x$IFCONFIG" = "x" ]; then
- # better than nothing...
- IFCONFIG=/sbin/ifconfig
-fi
-
-$IFCONFIG tap0 192.168.7.1
-
-# setup NAT for tap0 interface to have internet access in QEMU
-IPTABLES=`which iptables`
-if [ "x$IPTABLES" = "x" ]; then
- IPTABLES=/sbin/iptables
-fi
-
-$IPTABLES -A POSTROUTING -t nat -j MASQUERADE -s 192.168.7.0/24
-echo 1 > /proc/sys/net/ipv4/ip_forward
-$IPTABLES -P FORWARD ACCEPT
diff --git a/scripts/poky-qemu-internal b/scripts/poky-qemu-internal
deleted file mode 100755
index 685d8d7a2..000000000
--- a/scripts/poky-qemu-internal
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/bin/bash
-
-# Handle running Poky images under qemu
-#
-# Copyright (C) 2006-2008 OpenedHand Ltd.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-
-# Call setting:
-# QEMU_MEMORY (optional) - set the amount of memory in the emualted system.
-# SERIAL_LOGFILE (optional) - log the serial port output to a file
-# CROSSPATH - the path to any cross toolchain to use with distcc
-#
-# Image options:
-# MACHINE - the machine to run
-# TYPE - the image type to run
-# ZIMAGE - the kernel image file to use
-# HDIMAGE - the disk image file to use
-#
-
-if [ -z "$QEMU_MEMORY" ]; then
- case "$MACHINE" in
- "qemux86")
- QEMU_MEMORY="128M"
- ;;
- *)
- QEMU_MEMORY="64M"
- ;;
- esac
-
-fi
-
-QEMUIFUP=`which poky-qemu-ifup`
-QEMUIFDOWN=`which poky-qemu-ifdown`
-KERNEL_NETWORK_CMD="ip=192.168.7.2::192.168.7.1:255.255.255.0"
-QEMU_TAP_CMD="-net tap,vlan=0,ifname=tap0,script=$QEMUIFUP,downscript=$QEMUIFDOWN"
-QEMU_NETWORK_CMD="-net nic,vlan=0 $QEMU_TAP_CMD"
-KERNCMDLINE="mem=$QEMU_MEMORY"
-
-SERIALOPTS=""
-if [ "x$SERIAL_LOGFILE" != "x" ]; then
- SERIALOPTS="-serial file:$SERIAL_LOGFILE"
-fi
-
-case "$MACHINE" in
- "qemuarm") ;;
- "qemuarmv6") ;;
- "qemuarmv7") ;;
- "qemux86") ;;
- "akita") ;;
- "spitz") ;;
- "nokia800") ;;
- "nokia800-maemo") ;;
- *)
- echo "Error: Unsupported machine type $MACHINE"
- return
- ;;
-esac
-
-if [ "$TYPE" != "nfs" -a ! -f "$HDIMAGE" ]; then
- echo "Error: Image file $HDIMAGE doesn't exist"
- return
-fi
-
-if [ ! -f "$ZIMAGE" ]; then
- echo "Error: Kernel image file $ZIMAGE doesn't exist"
- return
-fi
-
-if [ -e /proc/sys/vm/mmap_min_addr ]; then
- if [ `cat /proc/sys/vm/mmap_min_addr` != "0" ]; then
- echo "Error, please set /proc/sys/vm/mmap_min_addr to 0 since otherwise it can cause problems with QEMU"
- return
- fi
-fi
-
-if [ "$MACHINE" = "qemuarm" -o "$MACHINE" = "qemuarmv6" -o "$MACHINE" = "qemuarmv7" ]; then
- QEMU=qemu-system-arm
- if [ "$TYPE" = "ext3" ]; then
- KERNCMDLINE="root=/dev/sda console=ttyAMA0 console=tty0 $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M versatilepb -hda $HDIMAGE -usb -usbdevice wacom-tablet -no-reboot"
- fi
- if [ "$TYPE" = "nfs" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- HDIMAGE=/srv/nfs/qemuarm
- fi
- if [ ! -d "$HDIMAGE" ]; then
- echo "Error: NFS mount point $HDIMAGE doesn't exist"
- return
- fi
- KERNCMDLINE="root=/dev/nfs nfsroot=192.168.7.1:$HDIMAGE rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M versatilepb -usb -usbdevice wacom-tablet -no-reboot"
- fi
- if [ "$MACHINE" = "qemuarmv6" ]; then
- QEMUOPTIONS="$QEMUOPTIONS -cpu arm1136"
- fi
- if [ "$MACHINE" = "qemuarmv7" ]; then
- QEMUOPTIONS="$QEMUOPTIONS -cpu cortex-a8"
- fi
-fi
-
-if [ "$MACHINE" = "qemux86" ]; then
- QEMU=qemu
- if [ "$TYPE" = "ext3" ]; then
- KERNCMDLINE="vga=0 root=/dev/hda mem=$QEMU_MEMORY $KERNEL_NETWORK_CMD"
- QEMUOPTIONS="-vga vmware $QEMU_NETWORK_CMD -hda $HDIMAGE -usb -usbdevice wacom-tablet -enable-gl"
- fi
- if [ "$TYPE" = "nfs" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- HDIMAGE=/srv/nfs/qemux86
- fi
- if [ ! -d "$HDIMAGE" ]; then
- echo "Error: NFS mount point $HDIMAGE doesn't exist."
- return
- fi
- KERNCMDLINE="root=/dev/nfs nfsroot=192.168.7.1:$HDIMAGE rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
- QEMUOPTIONS="-vga std -usb -usbdevice wacom-tablet $QEMU_NETWORK_CMD"
- fi
-fi
-
-if [ "$MACHINE" = "spitz" ]; then
- QEMU=qemu-system-arm
- if [ "$TYPE" = "ext3" ]; then
- echo $HDIMAGE
- HDIMAGE=`readlink -f $HDIMAGE`
- echo $HDIMAGE
- if [ ! -e "$HDIMAGE.qemudisk" ]; then
- echo "Adding a partition table to the ext3 image for use by QEMU, please wait..."
- poky-addptable2image $HDIMAGE $HDIMAGE.qemudisk
- fi
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M spitz -hda $HDIMAGE.qemudisk -portrait"
- fi
-fi
-
-if [ "$MACHINE" = "akita" ]; then
- QEMU=qemu-system-arm
- if [ "$TYPE" = "jffs2" ]; then
- HDIMAGE=`readlink -f $HDIMAGE`
- if [ ! -e "$HDIMAGE.qemuflash" ]; then
- echo "Converting raw image into flash image format for use by QEMU, please wait..."
- raw2flash.akita < $HDIMAGE > $HDIMAGE.qemuflash
- fi
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M akita -mtdblock $HDIMAGE.qemuflash -portrait"
- fi
-fi
-
-if [ "$MACHINE" = "nokia800" ]; then
- QEMU=qemu-system-arm
- if [ "$TYPE" = "jffs2" ]; then
- HDIMAGE=`readlink -f $HDIMAGE`
- if [ ! -e "$HDIMAGE.qemuflash" ]; then
- echo "'Flashing' rootfs, please wait..."
- poky-nokia800-flashutil $HDIMAGE $HDIMAGE.qemuflash
- fi
- KERNCMDLINE="root=/dev/mtdblock4 rootfstype=jffs2"
- QEMU_NETWORK_CMD="-net nic,model=usb,vlan=0 $QEMU_TAP_CMD"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M n800 -mtdblock $HDIMAGE.qemuflash -serial vc -m 130 -serial vc -serial vc -serial vc -usb -usbdevice net:0"
- fi
-fi
-
-if [ "$MACHINE" = "nokia800-maemo" ]; then
- QEMU=qemu-system-arm
- if [ "$TYPE" = "jffs2" ]; then
- HDIMAGE=`readlink -f $HDIMAGE`
- if [ ! -e "$HDIMAGE.qemuflash" ]; then
- if [ ! -e "$HDIMAGE.initfs" ]; then
- echo "Error, $HDIMAGE.initfs must exist!"
- return
- fi
- if [ ! -e "$HDIMAGE.config" ]; then
- echo "Error, $HDIMAGE.config must exist!"
- echo "To generate it, take an n800 and cat /dev/mtdblock1 > $HDIMAGE.config"
- return
- fi
- echo "'Flashing' config partition, please wait..."
- poky-nokia800-flashutil $HDIMAGE.config $HDIMAGE.qemuflash config
- echo "'Flashing' initfs, please wait..."
- poky-nokia800-flashutil $HDIMAGE.initfs $HDIMAGE.qemuflash initfs
- echo "'Flashing' rootfs, please wait..."
- poky-nokia800-flashutil $HDIMAGE $HDIMAGE.qemuflash
- fi
- KERNCMDLINE=""
- QEMU_NETWORK_CMD="-net nic,model=usb,vlan=0 $QEMU_TAP_CMD"
- QEMUOPTIONS="$QEMU_NETWORK_CMD -M n800 -mtdblock $HDIMAGE.qemuflash -serial vc -m 130 -serial vc -serial vc -serial vc -usb -usbdevice net:0 -show-cursor"
- fi
-fi
-
-if [ "x$QEMUOPTIONS" = "x" ]; then
- echo "Error: Unable to support this combination of options"
- return
-fi
-
-SDKDIR="/usr/local/poky/eabi-glibc"
-if [ "$MACHINE" = "qemuarm" -o "$MACHINE" = "spitz" -o "$MACHINE" = "borzoi" -o "$MACHINE" = "akita" -o "$MACHINE" = "nokia800" ]; then
- SDKPATH="$SDKDIR/arm/arm-poky-linux-gnueabi/bin:$SDKDIR/arm/bin"
-fi
-
-if [ "$MACHINE" = "qemux86" ]; then
- SDKPATH="$SDKDIR/i586/i586-poky-linux/bin:$SDKDIR/i586/bin"
-fi
-PATH=$CROSSPATH:$SDKPATH:$PATH
-
-QEMUBIN=`which $QEMU`
-
-if [ ! -x "$QEMUBIN" ]; then
- echo "Error: No QEMU binary '$QEMU' could be found."
- return
-fi
-
-function _quit() {
- if [ -n "$PIDFILE" ]; then
- #echo kill `cat $PIDFILE`
- kill `cat $PIDFILE`
- fi
- return
-}
-
-DISTCCD=`which distccd`
-PIDFILE=""
-
-trap _quit INT TERM QUIT
-
-if [ -x "$DISTCCD" ]; then
- echo "Starting distccd..."
- PIDFILE=`mktemp`
- $DISTCCD --allow 192.168.7.2 --daemon --pid-file $PIDFILE &
-else
- echo "Warning: distccd not present, no distcc support loaded."
-fi
-
-echo "Running $QEMU using sudo..."
-echo $QEMUBIN -kernel $ZIMAGE $QEMUOPTIONS $SERIALOPTS $* --append "$KERNCMDLINE"
-sudo $QEMUBIN -kernel $ZIMAGE $QEMUOPTIONS $SERIALOPTS $* --append "$KERNCMDLINE" || /bin/true
-
-trap - INT TERM QUIT
-return
diff --git a/scripts/poky-qemu.README b/scripts/poky-qemu.README
deleted file mode 100644
index ecceae978..000000000
--- a/scripts/poky-qemu.README
+++ /dev/null
@@ -1,90 +0,0 @@
-Poky images with QEMU
-=====================
-
-Poky can generate qemu bootable kernels and images with can be used
-on a desktop system. Both arm and x86 images can currently be booted.
-There are two scripts, runqemu and poky-qemu, one for use within poky,
-the other externally.
-
-QEMU outside Poky (poky-qemu)
-=============================
-
-The poky-qemu script is run as:
-
- MACHINE=<machine> poky-qemu <zimage> <filesystem>
-
-where:
-
- <zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
- <filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2)
- <machine> is "qemuarm" or "qemux86"
-
-The MACHINE=<machine> prefix is optional and without it the script will try
-to detect the machine name from the name of the <zimage> file.
-
-If <filesystem> isn't specified, nfs booting will be assumed.
-
-
-QEMU within Poky (runqemu)
-==========================
-
-The runqemu script is run as:
-
- runqemu <target> <type> <zimage> <filesystem>
-
-where:
-
- <target> is "qemuarm","qemux86","nokia800","spitz" or "akita"
- <type> is "ext2", "nfs", "ext3" or "jffs2". (not all machines support all options)
- <zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
- <filesystem> is the path to the image (e.g. filesystem-qemuarm.ext2)
-
-It will default to the qemuarm, ext2 and the last kernel and poky-image-sdk
-image built by poky.
-
-
-Notes
-=====
-
- - The scripts run qemu using sudo. Change perms on /dev/net/tun to
- run as non root
- - You can access the host computer at 192.168.7.1 within the image.
- - Your qemu system will be accessible as 192.16.7.2.
- - The default NFS mount points are /srv/nfs/qemux86 or /srv/nfs/qemuarm
- depending on the target type.
- - Images built for qemux86/qemuarm contain NFS server which export whole
- rootfs (/) in read/write mode.
- - You can set QEMU_MEMORY to control amount of available memory (default 64M).
- - You can set SERIAL_LOGFILE to have the serial output from the image logged
- to a file.
-
-
-NFS Image Notes
-===============
-
-As root;
-
-% apt-get install nfs-kernel-server
-
-% mkdir /srv/nfs/qemuarm
-
-Edit via /etc/exports :
-
-# /etc/exports: the access control list for filesystems which may be exported
-# to NFS clients. See exports(5).
-/srv/nfs/qemuarm 192.168.7.2(rw,no_root_squash)
-
-% /etc/init.d/nfs-kernel-server restart
-
-% modprobe tun
-
-untar build/tmp/deploy/images/<built image>.rootfs.tar.bz2 into /srv/nfs/qemuarm
-
-Finally, launch:
-
-% runqemu <target> nfs
-
-(Substitute qemux86 for qemuarm when using qemux86)
-
-
- Copyright (C) 2006-2008 OpenedHand Ltd.
diff --git a/scripts/qemuimage-testlib b/scripts/qemuimage-testlib
new file mode 100755
index 000000000..7bdaa4fb5
--- /dev/null
+++ b/scripts/qemuimage-testlib
@@ -0,0 +1,778 @@
+#!/bin/bash
+# Common function for test
+# Expect should be installed for SSH Testing
+# To execute `runqemu`, NOPASSWD needs to be set in /etc/sudoers for user
+# For example, for user "builder", /etc/sudoers can be like following:
+# #########
+# #Members of the admin group may gain root privileges
+# %builder ALL=(ALL) NOPASSWD: NOPASSWD: ALL
+# #########
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+TYPE="ext3"
+
+# The folder to hold all scripts running on targets
+TOOLS="$COREBASE/scripts/qemuimage-tests/tools"
+
+# The folder to hold all projects for toolchain testing
+TOOLCHAIN_PROJECTS="$COREBASE/scripts/qemuimage-tests/toolchain_projects"
+
+# Test Directory on target for testing
+TARGET_TEST_DIR="/opt/test"
+
+# Global variable for process id
+PID=0
+
+# Global variable for target ip address
+TARGET_IPADDR=0
+
+# Global variable for test project version during toolchain test
+# Version of cvs is 1.12.13
+# Version of iptables is 1.4.11
+# Version of sudoku-savant is 1.3
+PROJECT_PV=0
+
+# Global variable for test project download URL during toolchain test
+# URL of cvs is http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2
+# URL of iptables is http://netfilter.org/projects/iptables/files/iptables-1.4.11.tar.bz2
+# URL of sudoku-savant is http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2
+PROJECT_DOWNLOAD_URL=0
+
+# SDK folder to hold toolchain tarball
+TOOLCHAIN_DIR="${DEPLOY_DIR}/sdk"
+
+# Toolchain test folder to hold extracted toolchain tarball
+TOOLCHAIN_TEST="/opt"
+
+# common function for information print
+Test_Error()
+{
+ echo -e "\tTest_Error: $*"
+}
+
+Test_Info()
+{
+ echo -e "\tTest_Info: $*"
+}
+
+# function to update target ip address
+# $1 is the process id of the process, which starts the qemu target
+# $2 is the ip address of the target
+Test_Update_IPSAVE()
+{
+ local pid=$1
+ local ip_addr=$2
+
+ if [ "$TEST_SERIALIZE" -eq 1 ]; then
+ echo "$pid $ip_addr" > $TARGET_IPSAVE
+ fi
+}
+
+# function to copy files from host into target
+# $1 is the ip address of target
+# $2 is the files, which need to be copied into target
+# $3 is the path on target, where files are copied into
+Test_SCP()
+{
+ local ip_addr=$1
+ local src=$2
+ local des=$3
+ local tmpfile=`mktemp`
+ local time_out=60
+ local ret=0
+
+ # We use expect to interactive with target by ssh
+ local exp_cmd=`cat << EOF
+eval spawn scp -o UserKnownHostsFile=$tmpfile "$src" root@$ip_addr:"$des"
+set timeout $time_out
+expect {
+ "*assword:" { send "\r"; exp_continue}
+ "*(yes/no)?" { send "yes\r"; exp_continue }
+ eof { exit [ lindex [wait] 3 ] }
+}
+EOF`
+
+ expect=`which expect`
+ if [ ! -x "$expect" ]; then
+ Test_Error "ERROR: Please install expect"
+ return 1
+ fi
+
+ expect -c "$exp_cmd"
+ ret=$?
+ rm -rf $tmpfile
+ return $ret
+}
+
+# function to run command in $ip_addr via ssh
+Test_SSH()
+{
+ local ip_addr=$1
+ shift
+ local command=$@
+ local tmpfile=`mktemp`
+ local time_out=60
+ local ret=0
+ local exp_cmd=`cat << EOF
+eval spawn ssh -o UserKnownHostsFile=$tmpfile root@$ip_addr "$command"
+set timeout $time_out
+expect {
+ "*assword:" { send "\r"; exp_continue}
+ "*(yes/no)?" { send "yes\r"; exp_continue }
+ eof { exit [ lindex [wait] 3 ] }
+}
+EOF`
+
+ expect=`which expect`
+ if [ ! -x "$expect" ]; then
+ Test_Error "ERROR: Please install expect"
+ return 1
+ fi
+
+ expect -c "$exp_cmd"
+ ret=$?
+ rm -rf $tmpfile
+ return $ret
+}
+
+# function to check if ssh is up in $ip_addr
+Test_SSH_UP()
+{
+ local ip_addr=$1
+ local timeout=$2
+ local interval=0
+
+ # If TEST_SERIALIZE is set, use existing running qemu for testing
+ if [ ${TEST_SERIALIZE} -eq 1 -a -e ${TARGET_IPSAVE} ]; then
+ timeout=50
+ fi
+
+ while [ ${interval} -lt ${timeout} ]
+ do
+ Test_SSH ${ip_addr} "hostname"
+ if [ $? -ne 0 ]; then
+ interval=`expr $interval + 10`
+ sleep 10
+ else
+ Test_Info "We can ssh on ${ip_addr} within ${interval} seconds"
+ return 0
+ fi
+
+ done
+
+ Test_Info "We can not ssh on ${ip_addr} in ${timeout} seconds"
+ return 1
+}
+
+# function to prepare target test environment
+# $1 is the ip address of target system
+# $2 is the files, which needs to be copied into target
+Test_Target_Pre()
+{
+ local ip_addr=$1
+ local testscript=$2
+
+ # Create a pre-defined folder for test scripts
+ Test_SSH $ip_addr "mkdir -p $TARGET_TEST_DIR"
+ if [ $? -eq 0 ]; then
+ # Copy test scripts into target
+ Test_SCP $ip_addr $testscript $TARGET_TEST_DIR && return 0
+ else
+ Test_Error "Fail to create $TARGET_TEST_DIR on target"
+ return 1
+ fi
+
+ return 1
+}
+
+# function to record test result in $TEST_RESULT/testresult.log
+Test_Print_Result()
+{
+ local PASS=0
+ local FAIL=0
+ local NORESULT=0
+ if [ $2 -eq 0 ]; then
+ PASS=1
+ elif [ $2 -eq 1 ]; then
+ FAIL=1
+ else
+ NORESULT=1
+ fi
+
+ # Format the output of the test result
+ echo -e "$1 $PASS $FAIL $NORESULT" | awk '{printf("\t"); for(i=1;i<=NF;i++) printf("%-15s",$i); printf("\n");}' >> $TEST_RESULT/testresult.log
+}
+
+# Test_Kill_Qemu to kill child pid with parent pid given
+# $1 is qemu process id, which needs to be killed
+Test_Kill_Qemu()
+{
+ local ret=0
+ local ppid=0
+ local i=0
+ local index=0
+ local total=0
+ declare local pid
+
+ # Check if $1 pid exists and is a qemu process
+ ps -fp $PID | grep -iq "qemu"
+
+ # Find all children pid of the pid $1
+ if [ $? -eq 0 ]; then
+
+ # Check if there is any child pid of the pid $PID
+ ppid=$PID
+ ps -f --ppid $ppid
+ ret=$?
+
+ while [ $ret -eq 0 ]
+ do
+ # If yes, get the child pid and check if the child pid has other child pid
+ # Continue the while loop until there is no child pid found
+ pid[$i]=`ps -f --ppid $ppid | awk '{if ($2 != "PID") print $2}'`
+ ppid=${pid[$i]}
+ i=$((i+1))
+ ps -f --ppid $ppid
+ ret=$?
+ done
+
+ # When TEST_SERIALIZE is set, qemu process will not be
+ # killed until all the cases are finished
+ if [ ${TEST_SERIALIZE} -eq 1 -a -e ${TEST_STATUS} ]; then
+ index=`sed -n 2p ${TEST_STATUS} | awk '{print $3}'`
+ total=`sed -n 2p ${TEST_STATUS} | awk '{print $4}'`
+ if [ ${index} != ${total} ]; then
+ Test_Info "Do not kill the qemu process and use it for later testing"
+ Test_Update_IPSAVE $PID $TARGET_IPADDR
+ else
+ # If it is the last case, let's kill it
+ while [ $i -ne 0 ]
+ do
+ i=$((i-1))
+ kill ${pid[$i]}
+ sleep 2
+ done
+
+ # Kill the parent id
+ kill $PID
+ fi
+
+ else
+ # Kill these children pids from the last one
+ while [ $i -ne 0 ]
+ do
+ i=$((i-1))
+ kill ${pid[$i]}
+ sleep 2
+ done
+
+ # Kill the parent id
+ kill $PID
+ fi
+ fi
+
+ return
+}
+
+# function to check if there is any qemu process
+Test_Check_Qemu_UP()
+{
+ local count=`ps -eo command | cut -d " " -f 1 | grep -c "\(^qemu\|.*/qemu\)"`
+ if [ ${count} -lt 1 ]; then
+ Test_Info "There is no Qemu process"
+ return 1
+ else
+ Test_Info "There is at least one Qemu process running"
+ return 0
+ fi
+}
+
+# function to check if network is up
+Test_Check_IP_UP()
+{
+ ping -c1 $1
+ if [ $? -ne 0 ]; then
+ Test_Info "IP $1 is not up"
+ return 1
+ else
+ Test_Info "IP $1 is up"
+ return 0
+ fi
+}
+
+# function to find kernel/rootfs image
+Test_Find_Image()
+{
+ where=""
+ kernel=""
+ arch=""
+ target=""
+ extension=""
+ rootfs=""
+
+ while getopts "l:k:a:t:" Option
+ do
+ case $Option in
+ l) where="$OPTARG"
+ ;;
+ k) kernel="$OPTARG"
+ extension="bin"
+ ;;
+ a) arch="$OPTARG"
+ ;;
+ t) target="$OPTARG"
+ extension="ext3"
+ ;;
+ *) echo "invalid option: -$Option" && return 1
+ ;;
+ esac
+ done
+
+ if [ ! -z $kernel ]; then
+ if [ -L ${where}/${kernel}-${arch}.${extension} ]; then
+ echo ${where}/${kernel}-${arch}.${extension}
+ return 0
+ else
+ for i in `dir ${where}`
+ do
+ # Exclude qemux86-64 when target is qemux86
+ echo $i | grep "${kernel}.*${arch}.*\.${extension}" | grep -qv "${kernel}.*${arch}-64.*\.${extension}"
+ if [ $? -eq 0 ]; then
+ echo ${where}/${i}
+ return 0
+ fi
+ done
+ return 1
+ fi
+ fi
+
+ if [ ! -z $target ]; then
+ if [ -L ${where}/${target}-${arch}.${extension} ]; then
+ rootfs=`readlink -f ${where}/${target}-${arch}.${extension}`
+ echo ${rootfs}
+ return 0
+ else
+ for i in `dir ${where}`
+ do
+ # Exclude qemux86-64 when target is qemux86
+ echo $i | grep "${target}-${arch}.*\.${extension}" | grep -qv "${target}-${arch}-64.*\.${extension}"
+ if [ $? -eq 0 ]; then
+ echo ${where}/${i}
+ return 0
+ fi
+ done
+ return 1
+ fi
+ fi
+ return 1
+}
+
+# function to parse IP address of target
+# $1 is the pid of qemu startup process
+Test_Fetch_Target_IP()
+{
+ local opid=$1
+ local ppid=0
+ local ip_addr=0
+ local i=0
+ declare local pid
+
+ # Check if $1 pid exists and contains ipaddr of target
+ ps -fp $opid | grep -oq "192\.168\.7\.[0-9]*::"
+
+ # Find all children pid of the pid $1
+ # and check if they contain ipaddr of target
+ if [ $? -ne 0 ]; then
+ # Check if there is any child pid of the pid $1
+ ppid=$opid
+ ps -f --ppid $ppid > /dev/zero
+ ret=$?
+
+ while [ $ret -eq 0 ]
+ do
+ # If yes, get the child pid and check if the child pid has other child pid
+ # Continue the while loop until there is no child pid found
+ pid[$i]=`ps -f --ppid $ppid | awk '{if ($2 != "PID") print $2}'`
+ ppid=${pid[$i]}
+ i=$((i+1))
+ ps -f --ppid $ppid > /dev/zero
+ ret=$?
+ done
+
+ # Check these children pids, if they have ipaddr included in command line
+ while [ $i -ne 0 ]
+ do
+ i=$((i-1))
+ ps -fp ${pid[$i]} | grep -oq "192\.168\.7\.[0-9]*::"
+ if [ $? -eq 0 ]; then
+ ip_addr=`ps -fp ${pid[$i]} | grep -o "192\.168\.7\.[0-9]*::" | awk -F":" '{print $1}'`
+ fi
+ sleep 1
+ done
+ else
+ ip_addr=`ps -fp $opid | grep -o "192\.168\.7\.[0-9]*::" | awk -F":" '{print $1}'`
+ fi
+
+ echo $ip_addr
+
+ return
+}
+
+# function to check if qemu and its network
+Test_Create_Qemu()
+{
+ local timeout=$1
+ local ret=1
+ local up_time=0
+
+ which runqemu
+ if [ $? -eq 0 ]; then
+ RUNQEMU=`which runqemu`
+ else
+ Test_Error "Can not find runqemu in \$PATH, return fail"
+ return 1
+ fi
+
+ if [ "$QEMUARCH" = "qemux86" -o "$QEMUARCH" = "qemux86-64" ]; then
+ KERNEL=$(Test_Find_Image -l ${DEPLOY_DIR}/images -k bzImage -a ${QEMUARCH})
+ elif [ "$QEMUARCH" = "qemuarm" -o "$QEMUARCH" = "spitz" -o "$QEMUARCH" = "borzoi" -o "$QEMUARCH" = "akita" -o "$QEMUARCH" = "nokia800" ]; then
+ KERNEL=$(Test_Find_Image -l ${DEPLOY_DIR}/images -k zImage -a ${QEMUARCH})
+ elif [ "$QEMUARCH" = "qemumips" -o "$QEMUARCH" = "qemuppc" ]; then
+ KERNEL=$(Test_Find_Image -l ${DEPLOY_DIR}/images -k vmlinux -a ${QEMUARCH})
+ fi
+
+ # If there is no kernel image found, return failed directly
+ if [ $? -eq 1 ]; then
+ Test_Info "No kernel image file found under ${DEPLOY_DIR}/images for ${QEMUARCH}, pls. have a check"
+ return $ret
+ fi
+
+ ROOTFS_IMAGE=$(Test_Find_Image -l ${DEPLOY_DIR}/images -t ${QEMUTARGET} -a ${QEMUARCH})
+
+ # If there is no rootfs image found, return failed directly
+ if [ $? -eq 1 ]; then
+ Test_Info "No ${QEMUTARGET} rootfs image file found under ${DEPLOY_DIR}/images for ${QEMUARCH}, pls. have a check"
+ return $ret
+ fi
+
+ TEST_ROOTFS_IMAGE="${TEST_TMP}/${QEMUTARGET}-${QEMUARCH}-test.ext3"
+
+ CP=`which cp`
+
+ # When TEST_SERIALIZE is set, we use the existing image under tmp folder
+ if [ ${TEST_SERIALIZE} -eq 1 -a -e "$TARGET_IPSAVE" ]; then
+ # If TARGET_IPSAVE exists, check PID of the qemu process from it
+ PID=`awk '{print $1}' $TARGET_IPSAVE`
+ timeout=50
+ else
+ rm -rf $TEST_ROOTFS_IMAGE
+ echo "Copying rootfs $ROOTFS_IMAGE to $TEST_ROOTFS_IMAGE"
+ $CP $ROOTFS_IMAGE $TEST_ROOTFS_IMAGE
+ if [ $? -ne 0 ]; then
+ Test_Info "Image ${ROOTFS_IMAGE} copy to ${TEST_ROOTFS_IMAGE} failed, return fail"
+ return $ret
+ fi
+
+ export MACHINE=$QEMUARCH
+
+ # Create Qemu in localhost VNC Port 1
+ echo "Running xterm -display ${DISPLAY} -e 'OE_TMPDIR=${OE_TMPDIR} ${RUNQEMU} ${KERNEL} ${TEST_ROOTFS_IMAGE} && /bin/sleep 60' &"
+ xterm -display ${DISPLAY} -e "OE_TMPDIR=${OE_TMPDIR} ${RUNQEMU} ${KERNEL} ${TEST_ROOTFS_IMAGE} && /bin/sleep 60" &
+
+ # Get the pid of the xterm processor, which will be used in Test_Kill_Qemu
+ PID=$!
+ fi
+
+ while [ ${up_time} -lt 10 ]
+ do
+ Test_Check_Qemu_UP
+ if [ $? -ne 0 ]; then
+ Test_Info "Wait for qemu up..."
+ up_time=`expr $up_time + 5`
+ sleep 5
+ else
+ Test_Info "Begin to check if qemu network is up"
+ break
+ fi
+ done
+
+ # Parse IP address of target from the qemu command line
+ if [ ${up_time} -lt ${timeout} ]; then
+ sleep 5
+ TARGET_IPADDR=`Test_Fetch_Target_IP $PID`
+ # If IP address is 0, means there is no qemu process found
+ if [ ${TARGET_IPADDR} == "0" ]; then
+ Test_Info "There is no qemu process or qemu ip address found, return failed"
+ return $ret
+ fi
+ fi
+
+ while [ ${up_time} -lt ${timeout} ]
+ do
+ Test_Check_IP_UP ${TARGET_IPADDR}
+ if [ $? -eq 0 ]; then
+ Test_Info "Qemu Network is up, ping with ${TARGET_IPADDR} is OK within ${up_time} seconds"
+ ret=0
+ break
+ else
+ Test_Info "Wait for Qemu Network up"
+ up_time=`expr $up_time + 5`
+ sleep 5
+ fi
+ done
+
+ if [ $ret -eq 0 ]; then
+ Test_Info "Qemu and its network is up"
+ return $ret
+ else
+ Test_Info "Qemu or its network is not up in ${timeout} seconds"
+ Test_Update_IPSAVE $PID $TARGET_IPADDR
+ return $ret
+ fi
+}
+
+# Function to prepare test project for toolchain test
+# $1 is the folder holding test project file
+# $2 is the test project name
+Test_Project_Prepare()
+{
+ local toolchain_dir=$1
+ local ret=1
+
+ if [ ! -d ${toolchain_dir} ]; then
+ mkdir -p ${toolchain_dir}
+ ret=$?
+
+ if [ $ret -ne 0 ]; then
+ Test_Info "Create ${toolchain_dir} fail, return"
+ return $ret
+ fi
+ fi
+
+ ret=0
+ # Download test project tarball if it does not exist
+ if [ ! -f ${toolchain_dir}/${2}-${PROJECT_PV}.${suffix} ]; then
+ wget -c -t 5 $PROJECT_DOWNLOAD_URL -O ${toolchain_dir}/${2}-${PROJECT_PV}.${suffix}
+ ret=$?
+ fi
+
+ # Extract the test project into ${TEST_TMP}
+ if [ $ret -eq 0 ]; then
+ tar jxf ${toolchain_dir}/${2}-${PROJECT_PV}.${suffix} -C ${TEST_TMP}
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ Test_Info "Extract ${2}-${PROJECT_PV}.${suffix} into ${TEST_TMP} successfully"
+ return $ret
+ else
+ Test_Info "Fail to extract ${2}-${PROJECT_PV}.${suffix} into ${TEST_TMP}"
+ return $ret
+ fi
+ else
+ Test_Info "Fail to download ${2}-${PROJECT_PV}.${suffix} from $PROJECT_DOWNLOAD_URL"
+ rm -rf ${toolchain_dir}/${2}-${PROJECT_PV}.${suffix}
+ return $ret
+ fi
+}
+
+# Function to prepare toolchain environment
+# $1 is toolchain directory to hold toolchain tarball
+# $2 is prefix name for toolchain tarball
+Test_Toolchain_Prepare()
+{
+ local toolchain_dir=$1
+ local sdk_name=$2
+ local ret=1
+
+ if [ ! -d ${toolchain_dir} ]; then
+ Test_Info "No directory ${toolchain_dir}, which holds toolchain tarballs"
+ return 1
+ fi
+
+ # Check if there is any toolchain tarball under $toolchain_dir with prefix $sdk_name
+ for i in `dir ${toolchain_dir}`
+ do
+ echo $i | grep "${sdk_name}-toolchain-gmae"
+ if [ $? -eq 0 ]; then
+ rm -rf ${TEST_TMP}/opt
+ tar jxf ${toolchain_dir}/${i} -C ${TEST_TMP}
+ ret=$?
+ break
+ fi
+ done
+
+ if [ $ret -eq 0 ]; then
+ Test_Info "Check if /opt is accessible for non-root user"
+
+ # Check if the non-root test user has write access of $TOOLCHAIN_TEST
+ if [ -d ${TOOLCHAIN_TEST} ]; then
+ touch ${TOOLCHAIN_TEST}
+ if [ $? -ne 0 ]; then
+ Test_Info "Has no right to modify folder $TOOLCHAIN_TEST, pls. chown it to test user"
+ return 2
+ fi
+ else
+ mkdir -p ${TOOLCHAIN_TEST}
+ if [ $? -ne 0 ]; then
+ Test_Info "Has no right to create folder $TOOLCHAIN_TEST, pls. create it and chown it to test user"
+ return 2
+ fi
+ fi
+
+ # If there is a toolchain folder under $TOOLCHAIN_TEST, let's remove it
+ if [ -d ${TOOLCHAIN_TEST}/poky ]; then
+ rm -rf ${TOOLCHAIN_TEST}/poky
+ fi
+
+ # Copy toolchain into $TOOLCHAIN_TEST
+ cp -r ${TEST_TMP}/opt/poky ${TOOLCHAIN_TEST}
+ ret=$?
+
+ if [ $ret -eq 0 ]; then
+ Test_Info "Successfully copy toolchain into $TOOLCHAIN_TEST"
+ return $ret
+ else
+ Test_Info "Meet error when copy toolchain into $TOOLCHAIN_TEST"
+ return $ret
+ fi
+ else
+ Test_Info "No tarball named ${sdk_name}-toolchain-gmae under ${toolchain_dir}"
+ return $ret
+ fi
+}
+
+# Function to execute command and exit if run out of time
+# $1 is timeout value
+# $2 is the command to be executed
+Test_Time_Out()
+{
+ local timeout=$1
+ shift
+ local command=$*
+ local date=0
+ local tmp=`mktemp`
+ local ret=1
+ local pid=0
+ local ppid=0
+ local i=0
+ declare local pid_l
+
+ # Run command in background
+ ($command; echo $? > $tmp) &
+ pid=$!
+ while ps -e -o pid | grep -qw $pid; do
+ if [ $date -ge $timeout ]; then
+ Test_Info "$timeout Timeout when running command $command"
+ rm -rf $tmp
+
+ # Find all child processes of pid and kill them
+ ppid=$pid
+ ps -f --ppid $ppid
+ ret=$?
+
+ while [ $ret -eq 0 ]
+ do
+ # If yes, get the child pid and check if the child pid has other child pid
+ # Continue the while loop until there is no child pid found
+ pid_l[$i]=`ps -f --ppid $ppid | awk '{if ($2 != "PID") print $2}'`
+ ppid=${pid_l[$i]}
+ i=$((i+1))
+ ps -f --ppid $ppid
+ ret=$?
+ done
+
+ # Kill these children pids from the last one
+ while [ $i -ne 0 ]
+ do
+ i=$((i-1))
+ kill ${pid_l[$i]}
+ sleep 2
+ done
+
+ # Kill the parent id
+ kill $pid
+ return 1
+ fi
+ sleep 5
+ date=`expr $date + 5`
+ done
+ ret=`cat $tmp`
+ rm -rf $tmp
+ return $ret
+}
+
+# Function to test toolchain
+# $1 is test project name
+# $2 is the timeout value
+Test_Toolchain()
+{
+ local test_project=$1
+ local timeout=$2
+ local ret=1
+ local suffix="tar.bz2"
+ local env_setup=""
+ local pro_install="${TEST_TMP}/pro_install"
+
+ # Set value for PROJECT_PV and PROJECT_DOWNLOAD_URL accordingly
+ if [ $test_project == "cvs" ]; then
+ PROJECT_PV=1.12.13
+ PROJECT_DOWNLOAD_URL="http://ftp.gnu.org/non-gnu/cvs/source/feature/1.12.13/cvs-1.12.13.tar.bz2"
+ elif [ $test_project == "iptables" ]; then
+ PROJECT_PV=1.4.11
+ PROJECT_DOWNLOAD_URL="http://netfilter.org/projects/iptables/files/iptables-1.4.11.tar.bz2"
+ elif [ $test_project == "sudoku-savant" ]; then
+ PROJECT_PV=1.3
+ PROJECT_DOWNLOAD_URL="http://downloads.sourceforge.net/project/sudoku-savant/sudoku-savant/sudoku-savant-1.3/sudoku-savant-1.3.tar.bz2"
+ else
+ Test_Info "Unknown test project name $test_project"
+ return 1
+ fi
+
+ # Download test project and extract it
+ Test_Project_Prepare $TOOLCHAIN_PROJECTS $test_project
+ if [ $? -ne 0 ]; then
+ Test_Info "Prepare test project file failed"
+ return 1
+ fi
+
+ # Extract toolchain tarball into ${TEST_TMP}
+ Test_Toolchain_Prepare $TOOLCHAIN_DIR $SDK_NAME
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ Test_Info "Prepare toolchain test environment failed"
+ return $ret
+ fi
+
+ if [ ! -d ${pro_install} ]; then
+ mkdir -p ${pro_install}
+ fi
+
+ # Begin to build test project in toolchain environment
+ env_setup=`find ${TOOLCHAIN_TEST}/poky -name "environment-setup*"`
+
+ source $env_setup
+
+ if [ $test_project == "cvs" -o $test_project == "iptables" ]; then
+ cd ${TEST_TMP}/${test_project}-${PROJECT_PV}
+ Test_Time_Out $timeout ./configure ${CONFIGURE_FLAGS} || { Test_Info "configure failed with $test_project"; return 1; }
+ Test_Time_Out $timeout make -j4 || { Test_Info "make failed with $test_project"; return 1; }
+ Test_Time_Out $timeout make install DESTDIR=${pro_install} || { Test_Info "make failed with $test_project"; return 1; }
+ cd -
+ ret=0
+ elif [ $test_project == "sudoku-savant" ]; then
+ cd ${TEST_TMP}/${test_project}-${PROJECT_PV}
+ Test_Time_Out $timeout ./configure ${CONFIGURE_FLAGS} || { Test_Info "configure failed with $test_project"; return 1; }
+ Test_Time_Out $timeout make -j4 || { Test_Info "make failed with $test_project"; return 1; }
+ cd -
+ ret=0
+ else
+ Test_Info "Unknown test project $test_project"
+ ret=1
+ fi
+
+ return $ret
+}
diff --git a/scripts/qemuimage-tests/sanity/boot b/scripts/qemuimage-tests/sanity/boot
new file mode 100755
index 000000000..cf8aafbc5
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/boot
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# Boot Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if qemu and qemu network is up.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=120
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+if [ $? -eq 0 ]; then
+ Test_Info "Boot Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "Boot" 0
+ exit 0
+else
+ Test_Info "Boot Test FAIL"
+ Test_Kill_Qemu
+ Test_Print_Result "Boot" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/compiler b/scripts/qemuimage-tests/sanity/compiler
new file mode 100755
index 000000000..ef0700732
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/compiler
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Compiler Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if gcc/g++/make command can work in target.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if gcc/g++/make can work in target
+if [ $RET -eq 0 -a -f $TOOLS/compiler_test.sh ]; then
+ # Copy compiler_test.sh into target
+ Test_Target_Pre ${TARGET_IPADDR} $TOOLS/compiler_test.sh
+ if [ $? -eq 0 ]; then
+ # Run compiler_test.sh to check if gcc/g++/make can work in target
+ Test_SSH ${TARGET_IPADDR} "sh $TARGET_TEST_DIR/compiler_test.sh"
+ RET=$?
+ else
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "Compiler Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "compiler" 0
+ exit 0
+else
+ Test_Info "Compiler FAIL, Pls. check above error log"
+ Test_Kill_Qemu
+ Test_Print_Result "compiler" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/connman b/scripts/qemuimage-tests/sanity/connman
new file mode 100755
index 000000000..0c62902f8
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/connman
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Conmman Check Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if connman can work in target.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if connman can work in target
+if [ $RET -eq 0 -a -f $TOOLS/connman_test.sh ]; then
+ # Copy connman_test.sh into target
+ Test_Target_Pre ${TARGET_IPADDR} $TOOLS/connman_test.sh
+ if [ $? -eq 0 ]; then
+ # Run connman_test.sh to check if connman can work in target
+ Test_SSH ${TARGET_IPADDR} "sh $TARGET_TEST_DIR/connman_test.sh"
+ RET=$?
+ else
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "Connman Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "connman" 0
+ exit 0
+else
+ Test_Info "Connman Test FAIL, Pls. check above error log"
+ Test_Kill_Qemu
+ Test_Print_Result "connman" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/dmesg b/scripts/qemuimage-tests/sanity/dmesg
new file mode 100755
index 000000000..aed29e05e
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/dmesg
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Dmesg Check Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if there is any error log in dmesg.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if there is any error log in dmesg
+if [ $RET -eq 0 -a -f $TOOLS/dmesg.sh ]; then
+ # Copy dmesg.sh into target
+ Test_Target_Pre ${TARGET_IPADDR} $TOOLS/dmesg.sh
+ if [ $? -eq 0 ]; then
+ # Run dmesg.sh to check if there is any error message with command dmesg
+ Test_SSH ${TARGET_IPADDR} "sh $TARGET_TEST_DIR/dmesg.sh"
+ RET=$?
+ else
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "Dmesg Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "dmesg" 0
+ exit 0
+else
+ Test_Info "Dmesg Test FAIL, Pls. check above error log"
+ Test_Kill_Qemu
+ Test_Print_Result "dmesg" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/rpm_query b/scripts/qemuimage-tests/sanity/rpm_query
new file mode 100755
index 000000000..dd652bd99
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/rpm_query
@@ -0,0 +1,52 @@
+#!/bin/bash
+# RPM Check Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if rpm command can work in target.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if rpm query can work in target
+if [ $RET -eq 0 -a -f $TOOLS/rpm_test.sh ]; then
+ # Copy rpm_test.sh into target
+ Test_Target_Pre ${TARGET_IPADDR} $TOOLS/rpm_test.sh
+ if [ $? -eq 0 ]; then
+ # Run rpm_test.sh to check if rpm query can work in target
+ Test_SSH ${TARGET_IPADDR} "sh $TARGET_TEST_DIR/rpm_test.sh -qa"
+ RET=$?
+ else
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "rpm query Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "rpm_query" 0
+ exit 0
+else
+ Test_Info "rpm query FAIL, Pls. check above error log"
+ Test_Kill_Qemu
+ Test_Print_Result "rpm_query" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/scp b/scripts/qemuimage-tests/sanity/scp
new file mode 100755
index 000000000..b0b693d0c
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/scp
@@ -0,0 +1,71 @@
+#!/bin/bash
+# SCP Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if file can be copied into target with scp command.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+SPID=0
+i=0
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if file can be copied from host into target
+# For qemu target, the file is 5M
+if [ $RET -eq 0 ]; then
+ echo $QEMUARCH | grep -q "qemu"
+
+ if [ $? -eq 0 ]; then
+ dd if=/dev/zero of=${TEST_TMP}/scp_test_file bs=512k count=10
+ Test_SCP ${TARGET_IPADDR} ${TEST_TMP}/scp_test_file /home/root &
+ SPID=$!
+ fi
+
+ # Check if scp finished or not
+ while [ $i -lt $TIMEOUT ]
+ do
+ ps -fp $SPID > /dev/null
+ if [ $? -ne 0 ]; then
+ RET=0
+ break
+ fi
+ i=$((i+5))
+ sleep 5
+ done
+
+ # Kill scp process if scp is not finished in time
+ if [ $i -ge $TIMEOUT ]; then
+ RET=1
+ kill $SPID
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "SCP Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "SCP" 0
+ exit 0
+else
+ Test_Info "SCP Test FAIL"
+ Test_Kill_Qemu
+ Test_Print_Result "SCP" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/shutdown b/scripts/qemuimage-tests/sanity/shutdown
new file mode 100755
index 000000000..d55c85941
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/shutdown
@@ -0,0 +1,76 @@
+#!/bin/bash
+# Shutdown Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if target can shutdown
+# For qemux86/x86-64, we use command "poweroff" for target shutdown
+# For non-x86 targets, we use command "reboot" for target shutdown
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+
+RET=1
+i=0
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if target can shutdown
+if [ $RET -eq 0 ]; then
+ echo $QEMUARCH | grep -q "qemux86"
+
+ # For qemux86/x86-64, command "poweroff" is used
+ # For non x86 qemu targets, command "reboot" is used because of BUG #100
+ if [ $? -eq 0 ]; then
+ Test_SSH ${TARGET_IPADDR} "/sbin/poweroff"
+ else
+ Test_SSH ${TARGET_IPADDR} "/sbin/reboot"
+ fi
+
+ # If qemu start up process ends up, it means shutdown completes
+ while [ $i -lt $TIMEOUT ]
+ do
+ ps -fp $PID > /dev/null
+ if [ $? -ne 0 ]; then
+ RET=0
+ break
+ fi
+ i=$((i+5))
+ sleep 5
+ done
+
+ if [ $i -ge $TIMEOUT ]; then
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "Shutdown Test PASS"
+ Test_Print_Result "shutdown" 0
+
+ # Remove TARGET_IPSAVE since no existing qemu running now
+ if [ -e ${TARGET_IPSAVE} ]; then
+ rm -rf ${TARGET_IPSAVE}
+ fi
+ exit 0
+else
+ Test_Info "Shutdown Test FAIL"
+ Test_Kill_Qemu
+ Test_Print_Result "shutdown" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/ssh b/scripts/qemuimage-tests/sanity/ssh
new file mode 100755
index 000000000..181296b0b
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/ssh
@@ -0,0 +1,39 @@
+#!/bin/bash
+# SSH Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if ssh service in qemu is up.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "SSH Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "SSH" 0
+ exit 0
+else
+ Test_Info "SSH Test FAIL"
+ Test_Kill_Qemu
+ Test_Print_Result "SSH" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/zypper_help b/scripts/qemuimage-tests/sanity/zypper_help
new file mode 100755
index 000000000..94f82c434
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/zypper_help
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Zypper Check Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if zypper command can work in target.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if zypper help can work in target
+if [ $RET -eq 0 -a -f $TOOLS/zypper_test.sh ]; then
+ # Copy zypper_test.sh into target
+ Test_Target_Pre ${TARGET_IPADDR} $TOOLS/zypper_test.sh
+ if [ $? -eq 0 ]; then
+ # Run zypper_test.sh to check if zypper help can work in target
+ Test_SSH ${TARGET_IPADDR} "sh $TARGET_TEST_DIR/zypper_test.sh help"
+ RET=$?
+ else
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "zypper help Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "zypper_help" 0
+ exit 0
+else
+ Test_Info "zypper help FAIL, Pls. check above error log"
+ Test_Kill_Qemu
+ Test_Print_Result "zypper_help" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/sanity/zypper_search b/scripts/qemuimage-tests/sanity/zypper_search
new file mode 100755
index 000000000..e687eadfb
--- /dev/null
+++ b/scripts/qemuimage-tests/sanity/zypper_search
@@ -0,0 +1,52 @@
+#!/bin/bash
+# Zypper Check Test Case for Sanity Test
+# The case boot up the Qemu target with `runqemu qemuxxx`.
+# Then check if zypper command can work in target.
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=400
+RET=1
+
+# Start qemu and check its network
+Test_Create_Qemu ${TIMEOUT}
+
+# If qemu network is up, check ssh service in qemu
+if [ $? -eq 0 ]; then
+ Test_Info "Begin to Test SSH Service in Qemu"
+ Test_SSH_UP ${TARGET_IPADDR} ${TIMEOUT}
+ RET=$?
+else
+ RET=1
+fi
+
+# Check if zypper search can work in target
+if [ $RET -eq 0 -a -f $TOOLS/zypper_test.sh ]; then
+ # Copy zypper_test.sh into target
+ Test_Target_Pre ${TARGET_IPADDR} $TOOLS/zypper_test.sh
+ if [ $? -eq 0 ]; then
+ # Run zypper_test.sh to check if zypper search can work in target
+ Test_SSH ${TARGET_IPADDR} "sh $TARGET_TEST_DIR/zypper_test.sh search avahi"
+ RET=$?
+ else
+ RET=1
+ fi
+fi
+
+if [ ${RET} -eq 0 ]; then
+ Test_Info "zypper search package avahi Test PASS"
+ Test_Kill_Qemu
+ Test_Print_Result "zypper_search" 0
+ exit 0
+else
+ Test_Info "zypper search package avahi FAIL, Pls. check above error log"
+ Test_Kill_Qemu
+ Test_Print_Result "zypper_search" 1
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/scenario/qemuarm/core-image-lsb b/scripts/qemuimage-tests/scenario/qemuarm/core-image-lsb
new file mode 100644
index 000000000..4fa606876
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuarm/core-image-lsb
@@ -0,0 +1,7 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemuarm/core-image-minimal b/scripts/qemuimage-tests/scenario/qemuarm/core-image-minimal
new file mode 100644
index 000000000..0fcc7bba8
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuarm/core-image-minimal
@@ -0,0 +1 @@
+sanity boot
diff --git a/scripts/qemuimage-tests/scenario/qemuarm/core-image-sato b/scripts/qemuimage-tests/scenario/qemuarm/core-image-sato
new file mode 100644
index 000000000..7a6353e1a
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuarm/core-image-sato
@@ -0,0 +1,8 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemuarm/core-image-sato-sdk b/scripts/qemuimage-tests/scenario/qemuarm/core-image-sato-sdk
new file mode 100644
index 000000000..42b8e1902
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuarm/core-image-sato-sdk
@@ -0,0 +1,9 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity compiler
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemuarm/meta-toolchain-gmae b/scripts/qemuimage-tests/scenario/qemuarm/meta-toolchain-gmae
new file mode 100644
index 000000000..199176efc
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuarm/meta-toolchain-gmae
@@ -0,0 +1,3 @@
+toolchain cvs
+toolchain iptables
+toolchain sudoku-savant
diff --git a/scripts/qemuimage-tests/scenario/qemumips/core-image-lsb b/scripts/qemuimage-tests/scenario/qemumips/core-image-lsb
new file mode 100644
index 000000000..4fa606876
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemumips/core-image-lsb
@@ -0,0 +1,7 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemumips/core-image-minimal b/scripts/qemuimage-tests/scenario/qemumips/core-image-minimal
new file mode 100644
index 000000000..0fcc7bba8
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemumips/core-image-minimal
@@ -0,0 +1 @@
+sanity boot
diff --git a/scripts/qemuimage-tests/scenario/qemumips/core-image-sato b/scripts/qemuimage-tests/scenario/qemumips/core-image-sato
new file mode 100644
index 000000000..7a6353e1a
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemumips/core-image-sato
@@ -0,0 +1,8 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemumips/core-image-sato-sdk b/scripts/qemuimage-tests/scenario/qemumips/core-image-sato-sdk
new file mode 100644
index 000000000..42b8e1902
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemumips/core-image-sato-sdk
@@ -0,0 +1,9 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity compiler
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemumips/meta-toolchain-gmae b/scripts/qemuimage-tests/scenario/qemumips/meta-toolchain-gmae
new file mode 100644
index 000000000..199176efc
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemumips/meta-toolchain-gmae
@@ -0,0 +1,3 @@
+toolchain cvs
+toolchain iptables
+toolchain sudoku-savant
diff --git a/scripts/qemuimage-tests/scenario/qemuppc/core-image-lsb b/scripts/qemuimage-tests/scenario/qemuppc/core-image-lsb
new file mode 100644
index 000000000..4fa606876
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuppc/core-image-lsb
@@ -0,0 +1,7 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemuppc/core-image-minimal b/scripts/qemuimage-tests/scenario/qemuppc/core-image-minimal
new file mode 100644
index 000000000..0fcc7bba8
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuppc/core-image-minimal
@@ -0,0 +1 @@
+sanity boot
diff --git a/scripts/qemuimage-tests/scenario/qemuppc/core-image-sato b/scripts/qemuimage-tests/scenario/qemuppc/core-image-sato
new file mode 100644
index 000000000..7a6353e1a
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuppc/core-image-sato
@@ -0,0 +1,8 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemuppc/core-image-sato-sdk b/scripts/qemuimage-tests/scenario/qemuppc/core-image-sato-sdk
new file mode 100644
index 000000000..42b8e1902
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuppc/core-image-sato-sdk
@@ -0,0 +1,9 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity compiler
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemuppc/meta-toolchain-gmae b/scripts/qemuimage-tests/scenario/qemuppc/meta-toolchain-gmae
new file mode 100644
index 000000000..199176efc
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemuppc/meta-toolchain-gmae
@@ -0,0 +1,3 @@
+toolchain cvs
+toolchain iptables
+toolchain sudoku-savant
diff --git a/scripts/qemuimage-tests/scenario/qemux86-64/core-image-lsb b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-lsb
new file mode 100644
index 000000000..4fa606876
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-lsb
@@ -0,0 +1,7 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemux86-64/core-image-minimal b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-minimal
new file mode 100644
index 000000000..0fcc7bba8
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-minimal
@@ -0,0 +1 @@
+sanity boot
diff --git a/scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato
new file mode 100644
index 000000000..7a6353e1a
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato
@@ -0,0 +1,8 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato-sdk b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato-sdk
new file mode 100644
index 000000000..42b8e1902
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86-64/core-image-sato-sdk
@@ -0,0 +1,9 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity compiler
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemux86-64/meta-toolchain-gmae b/scripts/qemuimage-tests/scenario/qemux86-64/meta-toolchain-gmae
new file mode 100644
index 000000000..199176efc
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86-64/meta-toolchain-gmae
@@ -0,0 +1,3 @@
+toolchain cvs
+toolchain iptables
+toolchain sudoku-savant
diff --git a/scripts/qemuimage-tests/scenario/qemux86/core-image-lsb b/scripts/qemuimage-tests/scenario/qemux86/core-image-lsb
new file mode 100644
index 000000000..4fa606876
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86/core-image-lsb
@@ -0,0 +1,7 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemux86/core-image-minimal b/scripts/qemuimage-tests/scenario/qemux86/core-image-minimal
new file mode 100644
index 000000000..0fcc7bba8
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86/core-image-minimal
@@ -0,0 +1 @@
+sanity boot
diff --git a/scripts/qemuimage-tests/scenario/qemux86/core-image-sato b/scripts/qemuimage-tests/scenario/qemux86/core-image-sato
new file mode 100644
index 000000000..7a6353e1a
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86/core-image-sato
@@ -0,0 +1,8 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemux86/core-image-sato-sdk b/scripts/qemuimage-tests/scenario/qemux86/core-image-sato-sdk
new file mode 100644
index 000000000..42b8e1902
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86/core-image-sato-sdk
@@ -0,0 +1,9 @@
+sanity ssh
+sanity scp
+sanity dmesg
+sanity zypper_help
+sanity zypper_search
+sanity rpm_query
+sanity compiler
+sanity connman
+sanity shutdown
diff --git a/scripts/qemuimage-tests/scenario/qemux86/meta-toolchain-gmae b/scripts/qemuimage-tests/scenario/qemux86/meta-toolchain-gmae
new file mode 100644
index 000000000..199176efc
--- /dev/null
+++ b/scripts/qemuimage-tests/scenario/qemux86/meta-toolchain-gmae
@@ -0,0 +1,3 @@
+toolchain cvs
+toolchain iptables
+toolchain sudoku-savant
diff --git a/scripts/qemuimage-tests/toolchain/cvs b/scripts/qemuimage-tests/toolchain/cvs
new file mode 100755
index 000000000..871d99110
--- /dev/null
+++ b/scripts/qemuimage-tests/toolchain/cvs
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# CVS compile Test for toolchain test
+# The case extract toolchain tarball into temp folder
+# Then compile CVS with the toolchain environment
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=120
+
+# Extract and test toolchain tarball
+Test_Toolchain cvs ${TIMEOUT}
+
+if [ $? -eq 0 ]; then
+ Test_Info "CVS Test PASS"
+ Test_Print_Result "CVS" 0
+ exit 0
+elif [ $? -eq 1 ]; then
+ Test_Info "CVS Test FAIL"
+ Test_Print_Result "CVS" 1
+ exit 1
+else
+ Test_Info "Skip CVS Test due to some configuration problem"
+ Test_Print_Result "CVS" 2
+ exit 2
+fi
diff --git a/scripts/qemuimage-tests/toolchain/iptables b/scripts/qemuimage-tests/toolchain/iptables
new file mode 100755
index 000000000..af89bbe7b
--- /dev/null
+++ b/scripts/qemuimage-tests/toolchain/iptables
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# iptables compile Test for toolchain test
+# The case extract toolchain tarball into temp folder
+# Then compile iptables with the toolchain environment
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=120
+
+# Extract and test toolchain tarball
+Test_Toolchain iptables ${TIMEOUT}
+
+if [ $? -eq 0 ]; then
+ Test_Info "iptables Test PASS"
+ Test_Print_Result "iptables" 0
+ exit 0
+elif [ $? -eq 1 ]; then
+ Test_Info "iptables Test FAIL"
+ Test_Print_Result "iptables" 1
+ exit 1
+else
+ Test_Info "Skip iptables Test due to some configuration problem"
+ Test_Print_Result "iptables" 2
+ exit 2
+fi
diff --git a/scripts/qemuimage-tests/toolchain/sudoku-savant b/scripts/qemuimage-tests/toolchain/sudoku-savant
new file mode 100755
index 000000000..3d149dea2
--- /dev/null
+++ b/scripts/qemuimage-tests/toolchain/sudoku-savant
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# sudoku-savant compile Test for toolchain test
+# The case extract toolchain tarball into temp folder
+# Then compile sudoku-savant with the toolchain environment
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+. $COREBASE/scripts/qemuimage-testlib
+
+TIMEOUT=240
+
+# Extract and test toolchain tarball
+Test_Toolchain sudoku-savant ${TIMEOUT}
+
+if [ $? -eq 0 ]; then
+ Test_Info "sudoku-savant Test PASS"
+ Test_Print_Result "sudoku-savant" 0
+ exit 0
+elif [ $? -eq 1 ]; then
+ Test_Info "sudoku-savant Test FAIL"
+ Test_Print_Result "sudoku-savant" 1
+ exit 1
+else
+ Test_Info "Skip sudoku-savant Test due to some configuration problem"
+ Test_Print_Result "sudoku-savant" 2
+ exit 2
+fi
diff --git a/scripts/qemuimage-tests/tools/compiler_test.sh b/scripts/qemuimage-tests/tools/compiler_test.sh
new file mode 100644
index 000000000..9c30d6d78
--- /dev/null
+++ b/scripts/qemuimage-tests/tools/compiler_test.sh
@@ -0,0 +1,137 @@
+#!/bin/bash
+# compiler test script running in target
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+# Prepare test folder for compiler test
+COMPILE_FOLDER="/opt/test/compile_test"
+TEST_FILE="$COMPILE_FOLDER/compile_test.c"
+EXECUTE_FILE="$COMPILE_FOLDER/compile_test"
+TEST_MAKEFILE="$COMPILE_FOLDER/makefile"
+TEST_LIST="gcc g++ make"
+
+if [ ! -d $COMPILE_FOLDER ]; then
+ mkdir -p $COMPILE_FOLDER
+fi
+
+Target_Info()
+{
+ echo -e "\tTARGET: $*"
+}
+
+Target_Err()
+{
+ echo -e "\tTARGET: ##### Error Log #####"
+ $@
+ echo -e "\tTARGET: ##### End #####"
+}
+
+# Function to generate a c test file for compiler testing
+Gen_File()
+{
+ temp=`mktemp`
+
+ # Generate c/c++ test file for compiler testing
+ echo "#include <stdio.h>" >> $temp
+ echo "#include <math.h>" >> $temp
+ echo "" >> $temp
+ echo "double" >> $temp
+ echo "convert(long long l)" >> $temp
+ echo "{" >> $temp
+ echo " return (double)l; // or double(l)" >> $temp
+ echo "}" >> $temp
+ echo "" >> $temp
+ echo "int" >> $temp
+ echo "main(int argc, char * argv[])" >> $temp
+ echo "{" >> $temp
+ echo " long long l = 10;" >> $temp
+ echo " double f;" >> $temp
+ echo "" >> $temp
+ echo " f = convert(l);" >> $temp
+ echo " printf(\"convert: %lld => %f\n\", l, f);" >> $temp
+ echo "" >> $temp
+ echo " f = 1234.67;" >> $temp
+ echo " printf(\"floorf(%f) = %f\n\", f, floorf(f));" >> $temp
+ echo " return 0;" >> $temp
+ echo "}" >> $temp
+ echo $temp
+}
+
+# Function to generate a makefile for compiler testing
+Gen_Makefile()
+{
+ temp=`mktemp`
+ basename=`basename $EXECUTE_FILE`
+
+ echo -e "$basename: $basename.o" >> $temp
+ echo -e "\tgcc -o $basename $basename.o -lm" >> $temp
+ echo -e "$basename.o: $basename.c" >> $temp
+ echo -e "\tgcc -c $basename.c" >> $temp
+
+ echo $temp
+}
+
+# Generate a c test file for compiler testing
+test_file=`Gen_File`
+
+MOVE=`which mv`
+$MOVE $test_file $TEST_FILE
+
+# Begin compiler test in target
+for cmd in $TEST_LIST
+do
+ which $cmd
+ if [ $? -ne 0 ]; then
+ Target_Info "No $cmd command found"
+ exit 1
+ fi
+
+ if [ "$cmd" == "make" ]; then
+ rm -rf $EXECUTE_FILE
+
+ # For makefile test, we need to generate a makefile and run with a c file
+ makefile=`Gen_Makefile`
+ $MOVE $makefile $TEST_MAKEFILE
+
+ cd `dirname $TEST_MAKEFILE`
+ make
+
+ if [ $? -ne 0 ]; then
+ Target_Info "$cmd running with error, Pls. check error in following"
+ Target_Err make
+ exit 1
+ fi
+ else
+ rm -rf $EXECUTE_FILE
+
+ # For gcc/g++, we compile a c test file and check the output
+ $cmd $TEST_FILE -o $EXECUTE_FILE -lm
+
+ if [ $? -ne 0 ]; then
+ Target_Info "$cmd running with error, Pls. check error in following"
+ Target_Err $cmd $TEST_FILE -o $EXECUTE_FILE -lm
+ exit 1
+ fi
+ fi
+
+ # Check if the binary file generated by $cmd can work without error
+ if [ -f $EXECUTE_FILE ]; then
+ $EXECUTE_FILE
+ if [ $? -ne 0 ]; then
+ Target_Info "$EXECUTE_FILE running with error, Pls. check error in following"
+ Target_Err $EXECUTE_FILE
+ exit 1
+ else
+ Target_Info "$cmd can work without problem in target"
+ fi
+ else
+ Target_Info "No executalbe file $EXECUTE_FILE found, Pls. check the error log"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/scripts/qemuimage-tests/tools/connman_test.sh b/scripts/qemuimage-tests/tools/connman_test.sh
new file mode 100644
index 000000000..8ed8b8b4a
--- /dev/null
+++ b/scripts/qemuimage-tests/tools/connman_test.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# connman test script running in target
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+Target_Info()
+{
+ echo -e "\tTARGET: $*"
+}
+
+Target_Err()
+{
+ echo -e "\tTARGET: connman has issue when running, Pls. check the error log"
+ echo -e "\tTARGET: ##### Error Log #####"
+ $1
+ echo -e "\tTARGET: ##### End #####"
+}
+
+# Check if ps comes from Procps or busybox first
+ls -l `which ps` | grep -q "busybox"
+RET=$?
+
+if [ $RET -eq 0 ]; then
+ PS="ps"
+else
+ PS="ps -ef"
+fi
+
+# Check if connmand is in target
+if [ ! -f /usr/sbin/connmand ]; then
+ Target_Info "No connmand command found"
+ exit 1
+fi
+
+# Check if connmand is running in background
+if [ $RET -eq 0 ]; then
+ count=`ps | awk '{print $5}' | grep -c connmand`
+else
+ count=`ps -eo comm | cut -d " " -f 1 | grep -c connmand`
+fi
+
+if [ $count -ne 1 ]; then
+ Target_Info "connmand has issue when running in background, Pls, check the output of ps"
+ ${PS} | grep connmand
+ exit 1
+fi
+
+# Check if there is always only one connmand running in background
+if [ connmand > /dev/null 2>&1 ]; then
+ Target_Info "connmand command run without problem"
+
+ if [ $RET -eq 0 ]; then
+ count=`ps | awk '{print $5}' | grep -c connmand`
+ else
+ count=`ps -eo comm | cut -d " " -f 1 | grep -c connmand`
+ fi
+
+ if [ $count -ne 1 ]; then
+ Target_Info "There are more than one connmand running in background, Pls, check the output of ps"
+ ${PS} | grep connmand
+ exit 1
+ else
+ Target_Info "There is always one connmand running in background, test pass"
+ exit 0
+ fi
+else
+ Target_Err connmand
+ exit 1
+fi
+
+exit 0
diff --git a/scripts/qemuimage-tests/tools/dmesg.sh b/scripts/qemuimage-tests/tools/dmesg.sh
new file mode 100644
index 000000000..66c022343
--- /dev/null
+++ b/scripts/qemuimage-tests/tools/dmesg.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# Dmesg test script running in QEMU
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+which dmesg
+if [ $? -ne 0 ]; then
+ echo "QEMU: No dmesg command found"
+ exit 1
+fi
+
+dmesg | grep -iq "error"
+if [ $? -eq 0 ]; then
+ echo "QEMU: There is some error log in dmesg:"
+ echo "QEMU: ##### Error Log ######"
+ dmesg | grep -i "error"
+ echo "QEMU: ##### End ######"
+ exit 1
+else
+ echo "QEMU: No error log in dmesg"
+ exit 0
+fi
diff --git a/scripts/qemuimage-tests/tools/rpm_test.sh b/scripts/qemuimage-tests/tools/rpm_test.sh
new file mode 100644
index 000000000..6e6f9112c
--- /dev/null
+++ b/scripts/qemuimage-tests/tools/rpm_test.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# rpm test script running in target
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+Target_Info()
+{
+ echo -e "\tTARGET: $*"
+}
+
+Target_Err()
+{
+ echo -e "\tTARGET: rpm command has issue when running, Pls. check the error log"
+ echo -e "\tTARGET: ##### Error Log #####"
+ $1
+ echo -e "\tTARGET: ##### End #####"
+}
+
+which rpm
+if [ $? -ne 0 ]; then
+ Target_Info "No rpm command found"
+ exit 1
+fi
+
+if [ rpm > /dev/null 2>&1 ]; then
+ Target_Info "rpm command run without problem"
+else
+ Target_Err rpm
+ exit 1
+fi
+
+# run rpm with specific command parsed to rpm_test.sh
+rpm $* > /dev/null 2>&1
+
+if [ $? -eq 0 ]; then
+ Target_Info "rpm $* work without problem"
+ exit 0
+else
+ Target_Err rpm $*
+ exit 1
+fi
diff --git a/scripts/qemuimage-tests/tools/zypper_test.sh b/scripts/qemuimage-tests/tools/zypper_test.sh
new file mode 100644
index 000000000..5e8e7aaac
--- /dev/null
+++ b/scripts/qemuimage-tests/tools/zypper_test.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# zypper test script running in target
+#
+# Author: Jiajun Xu <jiajun.xu@intel.com>
+#
+# This file is licensed under the GNU General Public License,
+# Version 2.
+#
+
+Target_Info()
+{
+ echo -e "\tTARGET: $*"
+}
+
+Target_Err()
+{
+ echo -e "\tTARGET: zypper command has issue when running, Pls. check the error log"
+ echo -e "\tTARGET: ##### Error Log #####"
+ $1
+ echo -e "\tTARGET: ##### End #####"
+}
+
+which zypper
+if [ $? -ne 0 ]; then
+ Target_Info "No zypper command found"
+ exit 1
+fi
+
+if [ zypper > /dev/null 2>&1 ]; then
+ Target_Info "zypper command run without problem"
+else
+ Target_Err zypper
+ exit 1
+fi
+
+# run zypper with specific command parsed to zypper_test.sh
+zypper $* > /dev/null 2>&1
+
+if [ $? -eq 0 ]; then
+ Target_Info "zypper $* work without problem"
+ exit 0
+else
+ Target_Err zypper $*
+ exit 1
+fi
diff --git a/scripts/rootfs_rpm-extract-postinst.awk b/scripts/rootfs_rpm-extract-postinst.awk
new file mode 100644
index 000000000..8f2836b32
--- /dev/null
+++ b/scripts/rootfs_rpm-extract-postinst.awk
@@ -0,0 +1,11 @@
+/Name:.*/ {
+ package = substr($0, 7)
+ next
+}
+/postinstall.*scriptlet .*/ {
+ next
+}
+{
+ print $0 >> ENVIRON["D"] "/etc/rpm-postinsts/" package ".sh"
+}
+
diff --git a/scripts/rpm-createsolvedb.py b/scripts/rpm-createsolvedb.py
new file mode 100755
index 000000000..0d5f2198a
--- /dev/null
+++ b/scripts/rpm-createsolvedb.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+#
+# This script generates a solution database for a directory containing rpm packages
+# but tries to be efficient about this, only doing so when the packages have changed
+# in some way.
+#
+# It is assumed something already went through and removed all the solvedb.done stamp files
+# in advance.
+#
+# First argument - the rpm binary to use
+# Subsequent arguments - paths to process solution databases for
+#
+
+import sys, os
+import hashlib
+import stat
+
+if len(sys.argv) < 1:
+ print("Error, rpm command not specified")
+ sys.exit(1)
+
+if len(sys.argv) < 2:
+ print("Error, no paths specified")
+ sys.exit(1)
+
+paths = sys.argv[2:]
+
+for path in paths:
+ if os.path.exists(path + "/solvedb.done"):
+ continue
+ data = ""
+ manifest = []
+ for root, dirs, files in os.walk(path):
+ for file in files:
+ f = os.path.join(root, file)
+ if f.startswith(path + "/" + "solvedb"):
+ continue
+ data = data + str(os.stat(f)[stat.ST_MTIME])
+ manifest.append(f)
+ checksum = hashlib.md5(data).hexdigest()
+
+ if os.path.exists(path + "/solvedb.checksum") and open(path + "/solvedb.checksum", "r").read() == checksum:
+ open(path + "/solvedb.done", "w")
+ continue
+
+ if os.path.exists(path + "/solvedb"):
+ os.system("rm -rf %s" % (path + "/solvedb"))
+ os.mkdir(path + "/solvedb")
+ m = open(path + "/solvedb/manifest", "w")
+ m.write("# Dynamically generated solve manifest\n")
+ for f in manifest:
+ m.write(f + "\n")
+ m.close()
+
+ cmd = sys.argv[1] + ' -i --replacepkgs --replacefiles --oldpackage -D "_dbpath ' + path + '/solvedb" --justdb \
+ --noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
+ --ignoresize --nosignature --nodigest -D "__dbi_txn create nofsync" \
+ ' + path + '/solvedb/manifest'
+ os.system(cmd)
+
+ open(path + "/solvedb.checksum", "w").write(checksum)
+ open(path + "/solvedb.done", "w")
+
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
new file mode 100755
index 000000000..5df8c0f70
--- /dev/null
+++ b/scripts/rpm2cpio.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+
+# This comes from the RPM5 5.4.0 distribution.
+
+pkg=$1
+if [ "$pkg" = "" -o ! -e "$pkg" ]; then
+ echo "no package supplied" 1>&2
+ exit 1
+fi
+
+leadsize=96
+o=`expr $leadsize + 8`
+set `od -j $o -N 8 -t u1 $pkg`
+il=`expr 256 \* \( 256 \* \( 256 \* $2 + $3 \) + $4 \) + $5`
+dl=`expr 256 \* \( 256 \* \( 256 \* $6 + $7 \) + $8 \) + $9`
+# echo "sig il: $il dl: $dl"
+
+sigsize=`expr 8 + 16 \* $il + $dl`
+o=`expr $o + $sigsize + \( 8 - \( $sigsize \% 8 \) \) \% 8 + 8`
+set `od -j $o -N 8 -t u1 $pkg`
+il=`expr 256 \* \( 256 \* \( 256 \* $2 + $3 \) + $4 \) + $5`
+dl=`expr 256 \* \( 256 \* \( 256 \* $6 + $7 \) + $8 \) + $9`
+# echo "hdr il: $il dl: $dl"
+
+hdrsize=`expr 8 + 16 \* $il + $dl`
+o=`expr $o + $hdrsize`
+EXTRACTOR="dd if=$pkg ibs=$o skip=1"
+
+COMPRESSION=`($EXTRACTOR |file -) 2>/dev/null`
+if echo $COMPRESSION |grep -iq gzip; then
+ DECOMPRESSOR=gunzip
+elif echo $COMPRESSION |grep -iq bzip2; then
+ DECOMPRESSOR=bunzip2
+elif echo $COMPRESSION |grep -iq xz; then
+ DECOMPRESSOR=unxz
+elif echo $COMPRESSION |grep -iq cpio; then
+ DECOMPRESSOR=cat
+else
+ # Most versions of file don't support LZMA, therefore we assume
+ # anything not detected is LZMA
+ DECOMPRESSOR=`which unlzma 2>/dev/null`
+ case "$DECOMPRESSOR" in
+ /* ) ;;
+ * ) DECOMPRESSOR=`which lzmash 2>/dev/null`
+ case "$DECOMPRESSOR" in
+ /* ) DECOMPRESSOR="lzmash -d -c" ;;
+ * ) DECOMPRESSOR=cat ;;
+ esac
+ ;;
+ esac
+fi
+
+$EXTRACTOR 2>/dev/null | $DECOMPRESSOR
diff --git a/scripts/runqemu b/scripts/runqemu
index 7d5107f14..fc7363fdd 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -1,8 +1,8 @@
#!/bin/bash
-
-# Handle Poky <-> QEmu interface voodoo
#
-# Copyright (C) 2006-2007 OpenedHand Ltd.
+# Handle running OE images standalone with QEMU
+#
+# Copyright (C) 2006-2011 Linux Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
@@ -17,133 +17,384 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-
-if [ "x$BUILDDIR" = "x" ]; then
- echo "You need to source poky-init-build-env before running this script"
+usage() {
+ MYNAME=`basename $0`
+ echo ""
+ echo "Usage: you can run this script with any valid combination"
+ echo "of the following options (in any order):"
+ echo " QEMUARCH - the qemu machine architecture to use"
+ echo " KERNEL - the kernel image file to use"
+ echo " ROOTFS - the rootfs image file or nfsroot directory to use"
+ echo " MACHINE=xyz - the machine name (optional, autodetected from KERNEL filename if unspecified)"
+ echo " Simplified QEMU command-line options can be passed with:"
+ echo " nographic - disables video console"
+ echo " serial - enables a serial console on /dev/ttyS0"
+ echo " kvm - enables KVM when running qemux86/qemux86-64 (VT-capable CPU required)"
+ echo " qemuparams=\"xyz\" - specify custom parameters to QEMU"
+ echo " bootparams=\"xyz\" - specify custom kernel parameters during boot"
+ echo ""
+ echo "Examples:"
+ echo " $MYNAME qemuarm"
+ echo " $MYNAME qemux86-64 core-image-sato ext3"
+ echo " $MYNAME path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial"
+ echo " $MYNAME qemux86 qemuparams=\"-m 256\""
+ echo " $MYNAME qemux86 bootparams=\"psplash=false\""
exit 1
+}
+
+if [ "x$1" = "x" ]; then
+ usage
fi
-INTERNAL_SCRIPT=`which poky-qemu-internal`
+error() {
+ echo "Error: "$*
+ usage
+}
+
+MACHINE=${MACHINE:=""}
+KERNEL=""
+FSTYPE=""
+ROOTFS=""
+LAZY_ROOTFS=""
+SCRIPT_QEMU_OPT=""
+SCRIPT_QEMU_EXTRA_OPT=""
+SCRIPT_KERNEL_OPT=""
-if [ "x$1" = "x" ]; then
- echo
- echo "Run as $0 MACHINE IMAGETYPE ZIMAGE IMAGEFILE"
- echo "where:"
- echo " MACHINE - the machine to emulate (qemuarm, qemux86)"
- echo " IMAGETYPE - the type of image to run (ext2, nfs) (default: ext2)"
- echo " ZIMAGE - the kernel to use (optional)"
- echo " IMAGEFILE - the image file/location to use (optional)"
- exit 1
-else
- MACHINE=$1
+# Determine whether the file is a kernel or QEMU image, and set the
+# appropriate variables
+process_filename() {
+ filename=$1
+
+ # Extract the filename extension
+ EXT=`echo $filename | awk -F . '{ print \$NF }'`
+ case /$EXT/ in
+ /bin/)
+ # A file ending in .bin is a kernel
+ [ -z "$KERNEL" ] && KERNEL=$filename || \
+ error "conflicting KERNEL args [$KERNEL] and [$filename]"
+ ;;
+ /ext[234]/|/jffs2/|/btrfs/)
+ # A file ending in a supportted fs type is a rootfs image
+ if [ -z "$FSTYPE" -o "$FSTYPE" = "$EXT" ]; then
+ FSTYPE=$EXT
+ ROOTFS=$filename
+ else
+ error "conflicting FSTYPE types [$FSTYPE] and [$EXT]"
+ fi
+ ;;
+ *)
+ error "unknown file arg [$filename]"
+ ;;
+ esac
+}
+
+# Parse command line args without requiring specific ordering. It's a
+# bit more complex, but offers a great user experience.
+KVM_ENABLED="no"
+while true; do
+ arg=${1}
+ case "$arg" in
+ "qemux86" | "qemux86-64" | "qemuarm" | "qemumips" | "qemuppc")
+ [ -z "$MACHINE" ] && MACHINE=$arg || \
+ error "conflicting MACHINE types [$MACHINE] and [$arg]"
+ ;;
+ "ext2" | "ext3" | "jffs2" | "nfs" | "btrfs")
+ [ -z "$FSTYPE" -o "$FSTYPE" = "$arg" ] && FSTYPE=$arg || \
+ error "conflicting FSTYPE types [$FSTYPE] and [$arg]"
+ ;;
+ *-image*)
+ [ -z "$ROOTFS" ] || \
+ error "conflicting ROOTFS args [$ROOTFS] and [$arg]"
+ if [ -f "$arg" ]; then
+ process_filename $arg
+ elif [ -d "$arg" ]; then
+ # Handle the case where the nfsroot dir has -image-
+ # in the pathname
+ echo "Assuming $arg is an nfs rootfs"
+ FSTYPE=nfs
+ ROOTFS=$arg
+ else
+ ROOTFS=$arg
+ LAZY_ROOTFS="true"
+ fi
+ ;;
+ "nographic")
+ SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -nographic"
+ SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT console=ttyS0"
+ ;;
+ "serial")
+ SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -serial stdio"
+ SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT console=ttyS0"
+ ;;
+ "qemuparams="*)
+ SCRIPT_QEMU_EXTRA_OPT="${arg##qemuparams=}"
+
+ # Warn user if they try to specify serial or kvm options
+ # to use simplified options instead
+ serial_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-serial\)'`
+ kvm_option=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-enable-kvm\)'`
+ [ ! -z "$serial_option" -o ! -z "$kvm_option" ] && \
+ error "Please use simplified serial or kvm options instead"
+ ;;
+ "bootparams="*)
+ SCRIPT_KERNEL_OPT="$SCRIPT_KERNEL_OPT ${arg##bootparams=}"
+ ;;
+ "audio")
+ if [ "x$MACHINE" = "xqemux86" -o "x$MACHINE" = "xqemux86-64" ]; then
+ echo "Enabling audio in qemu."
+ echo "Please install snd_intel8x0 or snd_ens1370 driver in linux guest."
+ QEMU_AUDIO_DRV="alsa"
+ SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -soundhw ac97,es1370"
+ fi
+ ;;
+ "kvm")
+ KVM_ENABLED="yes"
+ KVM_CAPABLE=`grep -q 'vmx\|smx' /proc/cpuinfo && echo 1`
+ ;;
+ "") break ;;
+ *)
+ # A directory name is an nfs rootfs
+ if [ -d "$arg" ]; then
+ echo "Assuming $arg is an nfs rootfs"
+ if [ -z "$FSTYPE" -o "$FSTYPE" = "nfs" ]; then
+ FSTYPE=nfs
+ else
+ error "conflicting FSTYPE types [$arg] and nfs"
+ fi
+
+ if [ -z "$ROOTFS" ]; then
+ ROOTFS=$arg
+ else
+ error "conflicting ROOTFS args [$ROOTFS] and [$arg]"
+ fi
+ elif [ -f "$arg" ]; then
+ process_filename $arg
+ else
+ error "unable to classify arg [$arg]"
+ fi
+ ;;
+ esac
shift
+done
+
+if [ ! -c /dev/net/tun ] ; then
+ echo "TUN control device /dev/net/tun is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)"
+ exit 1
+elif [ ! -w /dev/net/tun ] ; then
+ echo "TUN control device /dev/net/tun is not writable, please fix (e.g. sudo chmod 666 /dev/net/tun)"
+ exit 1
fi
-if [ "x$1" != "x" ]; then
- TYPE=$1
- shift
-else
- TYPE="ext3"
- if [ "$MACHINE" = "akita" ]; then
- TYPE="jffs2"
+YOCTO_KVM_WIKI="https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
+# Detect KVM configuration
+if [ "x$KVM_ENABLED" = "xyes" ]; then
+ if [ -z "$KVM_CAPABLE" ]; then
+ echo "You are trying to enable KVM on a cpu without VT support."
+ echo "Remove kvm from the command-line, or refer"
+ echo "$YOCTO_KVM_WIKI";
+ exit 1;
+ fi
+ if [ "x$MACHINE" != "xqemux86" -a "x$MACHINE" != "xqemux86-64" ]; then
+ echo "KVM only support x86 & x86-64. Remove kvm from the command-line";
+ exit 1;
fi
- if [ "$MACHINE" = "nokia800" ]; then
- TYPE="jffs2"
+ if [ ! -e /dev/kvm ]; then
+ echo "Missing KVM device. Have you inserted kvm modules?"
+ echo "For further help see"
+ echo "$YOCTO_KVM_WIKI";
+ exit 1;
fi
- if [ "$MACHINE" = "spitz" ]; then
- TYPE="ext3"
+ if 9<>/dev/kvm ; then
+ SCRIPT_QEMU_OPT="$SCRIPT_QEMU_OPT -enable-kvm"
+ else
+ echo "You have no rights on /dev/kvm."
+ echo "Please change the ownership of this file as described at"
+ echo "$YOCTO_KVM_WIKI";
+ exit 1;
fi
fi
-if [ "x$1" != "x" ]; then
- ZIMAGE=$1
- shift
+# Report errors for missing combinations of options
+if [ -z "$MACHINE" -a -z "$KERNEL" ]; then
+ error "you must specify at least a MACHINE or KERNEL argument"
fi
-
-if [ "x$1" != "x" ]; then
- HDIMAGE=$1
- shift
+if [ "$FSTYPE" = "nfs" -a -z "$ROOTFS" ]; then
+ error "NFS booting without an explicit ROOTFS path is not yet supported"
fi
-if [ "$MACHINE" = "qemuarm" -o "$MACHINE" = "spitz" -o "$MACHINE" = "borzoi" -o "$MACHINE" = "akita" -o "$MACHINE" = "nokia800" ]; then
- if [ "x$ZIMAGE" = "x" ]; then
- ZIMAGE=$BUILDDIR/tmp/deploy/images/zImage-$MACHINE.bin
+if [ -z "$MACHINE" ]; then
+ MACHINE=`basename $KERNEL | sed 's/.*-\(qemux86-64\|qemux86\|qemuarm\|qemumips\|qemuppc\).*/\1/'`
+ if [ -z "$MACHINE" ]; then
+ error "Unable to set MACHINE from kernel filename [$KERNEL]"
fi
- CROSSPATH=$BUILDDIR/tmp/cross/arm-poky-linux-gnueabi/bin
+ echo "Set MACHINE to [$MACHINE] based on kernel [$KERNEL]"
fi
+machine2=`echo $MACHINE | tr 'a-z' 'A-Z' | sed 's/-/_/'`
+# MACHINE is now set for all cases
+
+# Defaults used when these vars need to be inferred
+QEMUX86_DEFAULT_KERNEL=bzImage-qemux86.bin
+QEMUX86_DEFAULT_FSTYPE=ext3
+
+QEMUX86_64_DEFAULT_KERNEL=bzImage-qemux86-64.bin
+QEMUX86_64_DEFAULT_FSTYPE=ext3
+
+QEMUARM_DEFAULT_KERNEL=zImage-qemuarm.bin
+QEMUARM_DEFAULT_FSTYPE=ext3
+
+QEMUMIPS_DEFAULT_KERNEL=vmlinux-qemumips.bin
+QEMUMIPS_DEFAULT_FSTYPE=ext3
+
+QEMUPPC_DEFAULT_KERNEL=vmlinux-qemuppc.bin
+QEMUPPC_DEFAULT_FSTYPE=ext3
+
+AKITA_DEFAULT_KERNEL=zImage-akita.bin
+AKITA_DEFAULT_FSTYPE=jffs2
+
+SPITZ_DEFAULT_KERNEL=zImage-spitz.bin
+SPITZ_DEFAULT_FSTYPE=ext3
-function findimage {
+setup_tmpdir() {
+ if [ -z "$OE_TMPDIR" ]; then
+ # Try to get OE_TMPDIR from bitbake
+ type -P bitbake &>/dev/null || {
+ echo "In order for this script to dynamically infer paths";
+ echo "to kernels or filesystem images, you either need";
+ echo "bitbake in your PATH or to source oe-init-build-env";
+ echo "before running this script" >&2;
+ exit 1; }
+
+ # We have bitbake in PATH, get OE_TMPDIR from bitbake
+ OE_TMPDIR=`bitbake -e | grep ^TMPDIR=\" | cut -d '=' -f2 | cut -d '"' -f2`
+ if [ -z "$OE_TMPDIR" ]; then
+ echo "Error: this script needs to be run from your build directory,"
+ echo "or you need to explicitly set OE_TMPDIR in your environment"
+ exit 1
+ fi
+ fi
+}
+
+setup_sysroot() {
+ # Toolchain installs set up $OECORE_NATIVE_SYSROOT in their
+ # environment script. If that variable isn't set, we're
+ # either in an in-tree build scenario or the environment
+ # script wasn't source'd.
+ if [ -z "$OECORE_NATIVE_SYSROOT" ]; then
+ setup_tmpdir
+ BUILD_ARCH=`uname -m`
+ BUILD_OS=`uname | tr '[A-Z]' '[a-z]'`
+ BUILD_SYS="$BUILD_ARCH-$BUILD_OS"
+
+ OECORE_NATIVE_SYSROOT=$OE_TMPDIR/sysroots/$BUILD_SYS
+ fi
+}
+
+# Locate a rootfs image to boot which matches our expected
+# machine and fstype.
+findimage() {
where=$1
machine=$2
extension=$3
- names=$4
- for name in $names;
- do
- fullname=$where/$name-$machine.$extension
- if [ -e "$fullname" ]; then
- HDIMAGE=$fullname
+
+ # Sort rootfs candidates by modification time - the most
+ # recently created one is the one we most likely want to boot.
+ filenames=`ls -t $where/*-image*$machine.$extension 2>/dev/null | xargs`
+ for name in $filenames; do
+ case $name in
+ *core-image-sato* | \
+ *core-image-lsb* | \
+ *core-image-basic* | \
+ *core-image-minimal* )
+ ROOTFS=$name
return
- fi
- done
- echo "Couldn't find image in $where. Attempted image names were:"
- for name in $names;
- do
- echo $name-$machine.$extension
+ ;;
+ esac
done
+
+ echo "Couldn't find a $machine rootfs image in $where."
exit 1
}
-if [ "$MACHINE" = "qemuarm" ]; then
- if [ "$TYPE" = "ext3" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- T=$BUILDDIR/tmp/deploy/images
- findimage $T qemuarm ext3 "poky-image-sdk poky-image-sato poky-image-minimal"
- fi
+if [ -e "$ROOTFS" -a -z "$FSTYPE" ]; then
+ # Extract the filename extension
+ EXT=`echo $ROOTFS | awk -F . '{ print \$NF }'`
+ if [ "x$EXT" = "xext2" -o "x$EXT" = "xext3" -o \
+ "x$EXT" = "xjffs2" -o "x$EXT" = "xbtrfs" ]; then
+ FSTYPE=$EXT
+ else
+ echo "Note: Unable to determine filesystem extension for $ROOTFS"
+ echo "We will use the default FSTYPE for $MACHINE"
+ # ...which is done further below...
fi
fi
-if [ "$MACHINE" = "spitz" ]; then
- if [ "$TYPE" = "ext3" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- HDIMAGE=$BUILDDIR/tmp/deploy/images/poky-image-sato-spitz.ext3
- fi
+if [ -z "$KERNEL" ]; then
+ setup_tmpdir
+ eval kernel_file=\$${machine2}_DEFAULT_KERNEL
+ KERNEL=$OE_TMPDIR/deploy/images/$kernel_file
+
+ if [ -z "$KERNEL" ]; then
+ error "Unable to determine default kernel for MACHINE [$MACHINE]"
fi
fi
+# KERNEL is now set for all cases
-if [ "$MACHINE" = "akita" ]; then
- if [ "$TYPE" = "jffs2" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- HDIMAGE=$BUILDDIR/tmp/deploy/images/poky-image-sato-akita.jffs2
- fi
+if [ -z "$FSTYPE" ]; then
+ eval FSTYPE=\$${machine2}_DEFAULT_FSTYPE
+
+ if [ -z "$FSTYPE" ]; then
+ error "Unable to determine default fstype for MACHINE [$MACHINE]"
fi
fi
-if [ "$MACHINE" = "nokia800" ]; then
- if [ "$TYPE" = "jffs2" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- HDIMAGE=$BUILDDIR/tmp/deploy/images/poky-image-sato-nokia800.jffs2
- fi
- fi
+# FSTYPE is now set for all cases
+
+# Handle cases where a ROOTFS type is given instead of a filename, e.g.
+# core-image-sato
+if [ "$LAZY_ROOTFS" = "true" ]; then
+ setup_tmpdir
+ echo "Assuming $ROOTFS really means $OE_TMPDIR/deploy/images/$ROOTFS-$MACHINE.$FSTYPE"
+ ROOTFS=$OE_TMPDIR/deploy/images/$ROOTFS-$MACHINE.$FSTYPE
fi
+if [ -z "$ROOTFS" ]; then
+ setup_tmpdir
+ T=$OE_TMPDIR/deploy/images
+ eval rootfs_list=\$${machine2}_DEFAULT_ROOTFS
+ findimage $T $MACHINE $FSTYPE
-if [ "$MACHINE" = "qemux86" ]; then
- if [ "x$ZIMAGE" = "x" ]; then
- ZIMAGE=$BUILDDIR/tmp/deploy/images/bzImage-$MACHINE.bin
+ if [ -z "$ROOTFS" ]; then
+ error "Unable to determine default rootfs for MACHINE [$MACHINE]"
fi
- if [ "$TYPE" = "ext3" ]; then
- if [ "x$HDIMAGE" = "x" ]; then
- T=$BUILDDIR/tmp/deploy/images
- findimage $T qemux86 ext3 "poky-image-sdk poky-image-sato poky-image-minimal moblin-image-netbook"
- fi
- fi
- CROSSPATH=$BUILDDIR/tmp/cross/i586-poky-linux/bin
fi
+# ROOTFS is now set for all cases
+
+echo ""
+echo "Continuing with the following parameters:"
+echo "KERNEL: [$KERNEL]"
+echo "ROOTFS: [$ROOTFS]"
+echo "FSTYPE: [$FSTYPE]"
+
+setup_sysroot
+# OECORE_NATIVE_SYSROOT is now set for all cases
-if [ ! -e $CROSSPATH/cc ]; then
- ln -s $CROSSPATH/gcc $CROSSPATH/cc
+# We can't run without a libGL.so
+libgl='no'
+
+[ -e /usr/lib/libGL.so -a -e /usr/lib/libGLU.so ] && libgl='yes'
+[ -e /usr/lib64/libGL.so -a -e /usr/lib64/libGLU.so ] && libgl='yes'
+[ -e /usr/lib/*-linux-gnu/libGL.so -a -e /usr/lib/*-linux-gnu/libGLU.so ] && libgl='yes'
+
+if [ "$libgl" != 'yes' ]; then
+ echo "You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.
+ Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.
+ Fedora package names are: mesa-libGL mesa-libGLU."
+ exit 1;
fi
-CROSSPATH=$BUILDDIR/tmp/staging/$BUILD_SYS/usr/bin:$CROSSPATH:$BUILDDIR/tmp/cross/bin
+INTERNAL_SCRIPT="$0-internal"
+if [ ! -f "$INTERNAL_SCRIPT" -o ! -r "$INTERNAL_SCRIPT" ]; then
+INTERNAL_SCRIPT=`which runqemu-internal`
+fi
. $INTERNAL_SCRIPT
diff --git a/scripts/poky-addptable2image b/scripts/runqemu-addptable2image
index f0195ad8a..f0195ad8a 100755
--- a/scripts/poky-addptable2image
+++ b/scripts/runqemu-addptable2image
diff --git a/scripts/runqemu-export-rootfs b/scripts/runqemu-export-rootfs
new file mode 100755
index 000000000..f8213ba4e
--- /dev/null
+++ b/scripts/runqemu-export-rootfs
@@ -0,0 +1,191 @@
+#!/bin/bash
+#
+# Copyright (c) 2005-2009 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+usage() {
+ echo "Usage: $0 {start|stop|restart} <nfs-export-dir>"
+}
+
+if [ $# != 2 ]; then
+ usage
+ exit 1
+fi
+
+if [[ "$1" != "start" && "$1" != "stop" && "$1" != "restart" ]]; then
+ echo "Unknown command '$1'"
+ usage
+ exit 1
+fi
+
+if [ ! -d "$2" ]; then
+ echo "Error: '$2' does not exist"
+ usage
+ exit 1
+fi
+# Ensure the nfs-export-dir is an absolute path
+NFS_EXPORT_DIR=$(cd "$2" && pwd)
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find the oe-find-native-sysroot script"
+ echo "Did you forget to source your build environment setup script?"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT
+
+if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/sbin/rpc.mountd" ]; then
+ echo "Error: Unable to find rpc.mountd binary in $OECORE_NATIVE_SYSROOT/usr/sbin/"
+
+ if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
+ echo "Have you run 'bitbake meta-ide-support'?"
+ else
+ echo "This shouldn't happen - something is missing from your toolchain installation"
+ fi
+ exit 1
+fi
+
+if [ ! -d ~/.runqemu-sdk ]; then
+ mkdir -p ~/.runqemu-sdk
+fi
+
+NFS_INSTANCE=${NFS_INSTANCE:=0}
+EXPORTS=~/.runqemu-sdk/exports$NFS_INSTANCE
+RMTAB=~/.runqemu-sdk/rmtab$NFS_INSTANCE
+NFSPID=~/.runqemu-sdk/nfs$NFS_INSTANCE.pid
+MOUNTPID=~/.runqemu-sdk/mount$NFS_INSTANCE.pid
+
+PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
+PSEUDO_LOCALSTATEDIR="$NFS_EXPORT_DIR/var/pseudo"
+export PSEUDO_LOCALSTATEDIR
+
+if [ ! -d "$PSEUDO_LOCALSTATEDIR" ]; then
+ echo "Error: $PSEUDO_LOCALSTATEDIR does not exist."
+ echo "Did you create the export directory using runqemu-extract-sdk?"
+ exit 1
+fi
+
+# rpc.mountd RPC port
+NFS_MOUNTPROG=$[ 21111 + $NFS_INSTANCE ]
+# rpc.nfsd RPC port
+NFS_NFSPROG=$[ 11111 + $NFS_INSTANCE ]
+# NFS port number
+NFS_PORT=$[ 3049 + $NFS_INSTANCE ]
+# mountd port number
+MOUNT_PORT=$[ 3048 + $NFS_INSTANCE ]
+
+## For debugging you would additionally add
+## --debug all
+MOUNTD_OPTS="--allow-non-root --mount-pid $MOUNTPID -f $EXPORTS --rmtab $RMTAB --prog $NFS_MOUNTPROG -r -P $MOUNT_PORT"
+NFSD_OPTS="--allow-non-root --nfs-pid $NFSPID -f $EXPORTS --prog $NFS_NFSPROG -P $NFS_PORT -r"
+
+# Setup the exports file
+if [ "$1" = "start" ]; then
+ echo "Creating exports file..."
+ echo "$NFS_EXPORT_DIR (rw,async,no_root_squash,no_all_squash,insecure)" > $EXPORTS
+fi
+
+# See how we were called.
+case "$1" in
+ start)
+ echo "Starting User Mode rpc.mountd"
+ echo " $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/sbin/rpc.mountd $MOUNTD_OPTS"
+ $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/sbin/rpc.mountd $MOUNTD_OPTS
+ if [ ! $? = 0 ]; then
+ echo "====================="
+ echo "Error starting MOUNTD"
+ echo "====================="
+ ps -ef | grep -v grep | grep rpcbind 2>&1 > /dev/null
+ if [ $? = 0 ] ; then
+ echo " If you see an error above that says:"
+ echo " RPC: Authentication error; why = Client credential too weak"
+ echo " You need to change the startup of rpcbind"
+ echo " on your host by doing the following as root:"
+ echo "==============================================="
+ echo " According to /etc/sysconfig/rpcbind, then "
+ echo " echo RPCBIND_ARGS=-i >> /etc/sysconfig/rpcbind"
+ echo " or"
+ echo " echo RPCBIND_OPTIONS=-i >> /etc/sysconfig/rpcbind"
+ echo " /etc/init.d/rpcbind restart"
+ echo "==============================================="
+ echo "For Ubuntu 11.10 hosts"
+ echo "Add OPTIONS=\"-i -w\" to /etc/default/rpcbind"
+ echo "sudo service portmap restart"
+ fi
+ exit 1
+ fi
+ echo "Starting User Mode nfsd"
+ echo " $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/sbin/rpc.nfsd $NFSD_OPTS"
+ $PSEUDO $PSEUDO_OPTS $OECORE_NATIVE_SYSROOT/usr/sbin/rpc.nfsd $NFSD_OPTS
+ if [ ! $? = 0 ]; then
+ echo "Error starting nfsd"
+ exit 1
+ fi
+ # Check to make sure everything started ok.
+ if [ ! -f $MOUNTPID ]; then
+ echo "rpc.mountd did not start correctly"
+ exit 1
+ fi
+ if [ ! -f $NFSPID ]; then
+ echo "rpc.nfsd did not start correctly"
+ exit 1
+ fi
+ ps -fp `cat $MOUNTPID` > /dev/null 2> /dev/null
+ if [ ! $? = 0 ]; then
+ echo "rpc.mountd did not start correctly"
+ exit 1
+ fi
+ ps -fp `cat $NFSPID` > /dev/null 2> /dev/null
+ if [ ! $? = 0 ]; then
+ echo "rpc.nfsd did not start correctly"
+ exit 1
+ fi
+ echo " "
+ echo "On your target please remember to add the following options for NFS"
+ echo "nfsroot=IP_ADDRESS:$NFS_EXPORT_DIR,nfsvers=2,mountprog=$NFS_MOUNTPROG,nfsprog=$NFS_NFSPROG,udp"
+ ;;
+ stop)
+ if [ -f "$MOUNTPID" ]; then
+ echo "Stopping rpc.mountd"
+ kill `cat $MOUNTPID`
+ rm -f $MOUNTPID
+ else
+ echo "No PID file, not stopping rpc.mountd"
+ fi
+ if [ -f "$NFSPID" ]; then
+ echo "Stopping rpc.nfsd"
+ kill `cat $NFSPID`
+ rm -f $NFSPID
+ else
+ echo "No PID file, not stopping rpc.nfsd"
+ fi
+ if [ -f "$EXPORTS" ]; then
+ echo "Removing exports file"
+ rm -f $EXPORTS
+ fi
+ ;;
+ restart)
+ $0 stop $NFS_EXPORT_DIR
+ $0 start $NFS_EXPORT_DIR
+ if [ ! $? = 0 ]; then
+ exit 1
+ fi
+ ;;
+ *)
+ echo "$0 {start|stop|restart} <nfs-export-dir>"
+ ;;
+esac
+
+exit 0
diff --git a/scripts/runqemu-extract-sdk b/scripts/runqemu-extract-sdk
new file mode 100755
index 000000000..4b5247597
--- /dev/null
+++ b/scripts/runqemu-extract-sdk
@@ -0,0 +1,100 @@
+#!/bin/bash
+#
+# This utility extracts an SDK image tarball using pseudo, and stores
+# the pseudo database in var/pseudo within the rootfs. If you want to
+# boot QEMU using an nfsroot, you *must* use this script to create the
+# rootfs to ensure it is done correctly with pseudo.
+#
+# Copyright (c) 2010 Intel Corp.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+function usage() {
+ echo "Usage: $0 <image-tarball> <extract-dir>"
+}
+
+if [ $# -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
+if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
+ echo "Error: Unable to find the oe-find-native-sysroot script"
+ echo "Did you forget to source your build system environment setup script?"
+ exit 1
+fi
+. $SYSROOT_SETUP_SCRIPT
+PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
+
+ROOTFS_TARBALL=$1
+SDK_ROOTFS_DIR=$2
+
+if [ ! -e "$ROOTFS_TARBALL" ]; then
+ echo "Error: sdk tarball '$ROOTFS_TARBALL' does not exist"
+ usage
+ exit 1
+fi
+
+# Convert SDK_ROOTFS_DIR to a full pathname
+if [[ ${SDK_ROOTFS_DIR:0:1} != "/" ]]; then
+ SDK_ROOTFS_DIR=$(pwd)/$SDK_ROOTFS_DIR
+fi
+
+TAR_OPTS=""
+if [[ "$ROOTFS_TARBALL" =~ tar\.bz2$ ]]; then
+ TAR_OPTS="-xjf"
+fi
+if [[ "$ROOTFS_TARBALL" =~ tar\.gz$ ]]; then
+ TAR_OPTS="-xzf"
+fi
+if [[ "$ROOTFS_TARBALL" =~ \.tar$ ]]; then
+ TAR_OPTS="-xf"
+fi
+if [ -z "$TAR_OPTS" ]; then
+ echo "Error: Unable to determine sdk tarball format"
+ echo "Accepted types: .tar / .tar.gz / .tar.bz2"
+ exit 1
+fi
+
+if [ ! -d "$SDK_ROOTFS_DIR" ]; then
+ echo "Creating directory $SDK_ROOTFS_DIR"
+ mkdir -p "$SDK_ROOTFS_DIR"
+fi
+
+if [ -e "$SDK_ROOTFS_DIR/var/pseudo" ]; then
+ echo "Error: $SDK_ROOTFS_DIR/var/pseudo already exists!"
+ echo "Please delete the entire rootfs tree manually if this is really what you want"
+ exit 1
+fi
+
+mkdir -p "$SDK_ROOTFS_DIR/var/pseudo"
+touch "$SDK_ROOTFS_DIR/var/pseudo/pseudo.pid"
+PSEUDO_LOCALSTATEDIR="$SDK_ROOTFS_DIR/var/pseudo"
+export PSEUDO_LOCALSTATEDIR
+
+echo "Extracting rootfs tarball using pseudo..."
+echo "$PSEUDO $PSEUDO_OPTS tar -C \"$SDK_ROOTFS_DIR\" $TAR_OPTS \"$ROOTFS_TARBALL\""
+$PSEUDO $PSEUDO_OPTS tar -C "$SDK_ROOTFS_DIR" $TAR_OPTS "$ROOTFS_TARBALL"
+
+DIRCHECK=`ls -l "$SDK_ROOTFS_DIR" | wc -l`
+if [ "$DIRCHECK" -lt 5 ]; then
+ echo "Warning: I don't see many files in $SDK_ROOTFS_DIR"
+ echo "Please double-check the extraction worked as intended"
+ exit 0
+fi
+
+echo "SDK image successfully extracted to $SDK_ROOTFS_DIR"
+
+exit 0
diff --git a/scripts/runqemu-gen-tapdevs b/scripts/runqemu-gen-tapdevs
new file mode 100755
index 000000000..15bccd448
--- /dev/null
+++ b/scripts/runqemu-gen-tapdevs
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+# Create a "bank" of tap network devices that can be used by the
+# runqemu script. This script needs to be run as root, and will
+# use the tunctl binary from the build system sysroot. Note: many Linux
+# distros these days still use an older version of tunctl which does not
+# support the group permissions option, hence the need to use the build
+# system provided version.
+#
+# Copyright (C) 2010 Intel Corp.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "Usage: sudo $0 <gid> <num> <native-sysroot-basedir>"
+ echo "Where <gid> is the numeric group id the tap devices will be owned by"
+ echo "<num> is the number of tap devices to create (0 to remove all)"
+ echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
+ exit 1
+}
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script must be run with root privileges"
+ exit
+fi
+
+if [ $# -ne 3 ]; then
+ echo "Error: Incorrect number of arguments"
+ usage
+fi
+
+GID=$1
+COUNT=$2
+SYSROOT=$3
+
+TUNCTL=$SYSROOT/usr/bin/tunctl
+if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
+ echo "Error: $TUNCTL is not an executable"
+ usage
+fi
+
+SCRIPT_DIR=`dirname $0`
+RUNQEMU_IFUP="$SCRIPT_DIR/runqemu-ifup"
+if [ ! -x "$RUNQEMU_IFUP" ]; then
+ echo "Error: Unable to find the runqemu-ifup script in $SCRIPT_DIR"
+ exit 1
+fi
+
+IFCONFIG=`which ifconfig 2> /dev/null`
+if [ -z "$IFCONFIG" ]; then
+ # Is it ever anywhere else?
+ IFCONFIG=/sbin/ifconfig
+fi
+if [ ! -x "$IFCONFIG" ]; then
+ echo "$IFCONFIG cannot be executed"
+ exit 1
+fi
+
+# Ensure we start with a clean slate
+for tap in `$IFCONFIG | grep ^tap | awk '{ print \$1 }'`; do
+ echo "Note: Destroying pre-existing tap interface $tap..."
+ $TUNCTL -d $tap
+done
+
+echo "Creating $COUNT tap devices for GID $GID..."
+for ((index=0; index < $COUNT; index++)); do
+ echo "Creating tap$index"
+ ifup=`$RUNQEMU_IFUP $GID $SYSROOT 2>&1`
+ if [ $? -ne 0 ]; then
+ echo "Error running tunctl: $ifup"
+ exit 1
+ fi
+done
+
+# The runqemu script will check for this file, and if it exists,
+# will use the existing bank of tap devices without creating
+# additional ones via sudo.
+touch /etc/runqemu-nosudo
diff --git a/scripts/runqemu-ifdown b/scripts/runqemu-ifdown
new file mode 100755
index 000000000..710d2975c
--- /dev/null
+++ b/scripts/runqemu-ifdown
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# QEMU network configuration script to bring down tap devices. This
+# utility needs to be run as root, and will use the tunctl binary
+# from the native sysroot.
+#
+# If you find yourself calling this script a lot, you can add the
+# the following to your /etc/sudoers file to be able to run this
+# command without entering your password each time:
+#
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifup
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifdown
+#
+# Copyright (c) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
+}
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script (runqemu-ifdown) must be run with root privileges"
+ exit 1
+fi
+
+if [ $# -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+TAP=$1
+NATIVE_SYSROOT_DIR=$2
+
+TUNCTL=$NATIVE_SYSROOT_DIR/usr/bin/tunctl
+if [ ! -e "$TUNCTL" ]; then
+ echo "Error: Unable to find tunctl binary in '$NATIVE_SYSROOT_DIR/usr/bin'"
+ exit 1
+fi
+
+$TUNCTL -d $TAP
diff --git a/scripts/runqemu-ifup b/scripts/runqemu-ifup
new file mode 100755
index 000000000..e4c3dafee
--- /dev/null
+++ b/scripts/runqemu-ifup
@@ -0,0 +1,114 @@
+#!/bin/bash
+#
+# QEMU network interface configuration script. This utility needs to
+# be run as root, and will use the tunctl binary from a native sysroot.
+# Note: many Linux distros these days still use an older version of
+# tunctl which does not support the group permissions option, hence
+# the need to use build system's version.
+#
+# If you find yourself calling this script a lot, you can add the
+# the following to your /etc/sudoers file to be able to run this
+# command without entering your password each time:
+#
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifup
+# <my-username> ALL=NOPASSWD: /path/to/runqemu-ifdown
+#
+# If you'd like to create a bank of tap devices at once, you should use
+# the runqemu-gen-tapdevs script instead. If tap devices are set up using
+# that script, the runqemu script will never end up calling this
+# script.
+#
+# Copyright (c) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+usage() {
+ echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
+}
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script (runqemu-ifup) must be run with root privileges"
+ exit 1
+fi
+
+if [ $# -ne 3 ]; then
+ usage
+ exit 1
+fi
+
+USERID="-u $1"
+GROUP="-g $2"
+NATIVE_SYSROOT_DIR=$3
+
+TUNCTL=$NATIVE_SYSROOT_DIR/usr/bin/tunctl
+if [ ! -x "$TUNCTL" ]; then
+ echo "Error: Unable to find tunctl binary in '$NATIVE_SYSROOT_DIR/usr/bin'"
+ exit 1
+fi
+
+TAP=`$TUNCTL -b $GROUP 2>&1`
+STATUS=$?
+if [ $STATUS -ne 0 ]; then
+# If tunctl -g fails, try using tunctl -u, for older host kernels
+# which do not support the TUNSETGROUP ioctl
+ TAP=`$TUNCTL -b $USERID 2>&1`
+ STATUS=$?
+ if [ $STATUS -ne 0 ]; then
+ echo "tunctl failed:"
+ exit 1
+ fi
+fi
+
+IFCONFIG=`which ifconfig 2> /dev/null`
+if [ "x$IFCONFIG" = "x" ]; then
+ # better than nothing...
+ IFCONFIG=/sbin/ifconfig
+fi
+if [ ! -x "$IFCONFIG" ]; then
+ echo "$IFCONFIG cannot be executed"
+ exit 1
+fi
+
+ROUTE=`which route`
+if [ "x$ROUTE" = "x" ]; then
+ # better than nothing...
+ ROUTE=/sbin/route
+fi
+if [ ! -x "$ROUTE" ]; then
+ echo "$ROUTE cannot be executed"
+ exit 1
+fi
+
+IPTABLES=`which iptables 2> /dev/null`
+if [ "x$IPTABLES" = "x" ]; then
+ IPTABLES=/sbin/iptables
+fi
+if [ ! -x "$IPTABLES" ]; then
+ echo "$IPTABLES cannot be executed"
+ exit 1
+fi
+
+n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
+$IFCONFIG $TAP 192.168.7.$n netmask 255.255.255.255
+
+dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
+$ROUTE add -host 192.168.7.$dest $TAP
+
+# setup NAT for tap0 interface to have internet access in QEMU
+$IPTABLES -A POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
+$IPTABLES -A POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
+echo 1 > /proc/sys/net/ipv4/ip_forward
+$IPTABLES -P FORWARD ACCEPT
+
+echo $TAP
diff --git a/scripts/runqemu-internal b/scripts/runqemu-internal
new file mode 100755
index 000000000..fe2974ba4
--- /dev/null
+++ b/scripts/runqemu-internal
@@ -0,0 +1,505 @@
+#!/bin/bash -x
+
+# Handle running OE images under qemu
+#
+# Copyright (C) 2006-2011 Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+# Call setting:
+# QEMU_MEMORY (optional) - set the amount of memory in the emualted system.
+# SERIAL_LOGFILE (optional) - log the serial port output to a file
+# CROSSPATH - the path to any cross toolchain to use with distcc
+#
+# Image options:
+# MACHINE - the machine to run
+# FSTYPE - the image type to run
+# KERNEL - the kernel image file to use
+# ROOTFS - the disk image file to use
+#
+
+
+mem_size=-1
+
+#Get rid of <> and get the contents of extra qemu running params
+SCRIPT_QEMU_EXTRA_OPT=`echo $SCRIPT_QEMU_EXTRA_OPT | sed -e 's/<//' -e 's/>//'`
+#if user set qemu memory, eg: -m 256 in qemu extra params, we need to do some
+# validation check
+mem_set=`expr "$SCRIPT_QEMU_EXTRA_OPT" : '.*\(-m[[:space:]] *[0-9]*\)'`
+if [ ! -z "$mem_set" ] ; then
+#Get memory setting size from user input
+ mem_size=`echo $mem_set | sed 's/-m[[:space:]] *//'`
+else
+ case "$MACHINE" in
+ "qemux86")
+ mem_size=128
+ ;;
+ "qemux86-64")
+ mem_size=128
+ ;;
+ "qemuarm")
+ mem_size=128
+ ;;
+ "qemumips")
+ mem_size=128
+ ;;
+ "qemuppc")
+ mem_size=128
+ ;;
+ *)
+ mem_size=64
+ ;;
+ esac
+
+fi
+
+# QEMU_MEMORY has 'M' appended to mem_size
+QEMU_MEMORY="$mem_size"M
+
+# Bug 433: qemuarm cannot use > 256 MB RAM
+if [ "$MACHINE" = "qemuarm" ]; then
+ if [ -z "$mem_size" -o $mem_size -gt 256 ]; then
+ echo "WARNING: qemuarm does not support > 256M of RAM."
+ echo "Changing QEMU_MEMORY to default of 256M."
+ QEMU_MEMORY="256M"
+ mem_size="256"
+ SCRIPT_QEMU_EXTRA_OPT=`echo $SCRIPT_QEMU_EXTRA_OPT | sed -e "s/$mem_set/-m 256/" `
+ fi
+fi
+
+# We need to specify -m <mem_size> to overcome a bug in qemu 0.14.0
+# https://bugs.launchpad.net/ubuntu/+source/qemu-kvm/+bug/584480
+
+if [ -z "$mem_set" ] ; then
+ SCRIPT_QEMU_EXTRA_OPT="$SCRIPT_QEMU_EXTRA_OPT -m $mem_size"
+fi
+# This file is created when runqemu-gen-tapdevs creates a bank of tap
+# devices, indicating that the user should not bring up new ones using
+# sudo.
+NOSUDO_FLAG="/etc/runqemu-nosudo"
+
+QEMUIFUP=`which runqemu-ifup 2> /dev/null`
+QEMUIFDOWN=`which runqemu-ifdown 2> /dev/null`
+if [ -z "$QEMUIFUP" -o ! -x "$QEMUIFUP" ]; then
+ echo "runqemu-ifup cannot be found or executed"
+ exit 1
+fi
+if [ -z "$QEMUIFDOWN" -o ! -x "$QEMUIFDOWN" ]; then
+ echo "runqemu-ifdown cannot be found or executed"
+ exit 1
+fi
+
+NFSRUNNING="false"
+
+acquire_lock() {
+ lockfile=$1
+ if [ -z "$lockfile" ]; then
+ echo "Error: missing lockfile arg passed to acquire_lock()"
+ return 1
+ fi
+
+ if [ -e "$lockfile.lock" ]; then
+ # Check that the lockfile is not stale
+ ps=`ps -eo pid | grep $(cat $lockfile.lock)`
+ if [ -z "$ps" ]; then
+ echo "WARNING: Stale lock file detected, deleting $lockfile.lock."
+ rm -f $lockfile.lock
+ echo $$ > $lockfile.lock
+ else
+ return 1
+ fi
+ else
+ echo $$ > $lockfile.lock
+ fi
+
+ return 0
+}
+
+release_lock() {
+ lockfile=$1
+ if [ -z "$lockfile" ]; then
+ echo "Error: missing lockfile arg passed to release_lock()"
+ return 1
+ fi
+
+ rm -f $lockfile.lock
+}
+
+LOCKDIR="/tmp/qemu-tap-locks"
+if [ ! -d "$LOCKDIR" ]; then
+ mkdir $LOCKDIR
+ chmod 777 $LOCKDIR
+fi
+
+IFCONFIG=`which ifconfig 2> /dev/null`
+if [ -z "$IFCONFIG" ]; then
+ IFCONFIG=/sbin/ifconfig
+fi
+if [ ! -x "$IFCONFIG" ]; then
+ echo "$IFCONFIG cannot be executed"
+ exit 1
+fi
+
+POSSIBLE=`$IFCONFIG -a | grep '^tap' | awk '{print $1}'`
+TAP=""
+LOCKFILE=""
+for tap in $POSSIBLE; do
+ LOCKFILE="$LOCKDIR/$tap"
+ echo "Acquiring lockfile for $tap..."
+ acquire_lock $LOCKFILE
+ if [ $? -eq 0 ]; then
+ TAP=$tap
+ break
+ fi
+done
+
+if [ "$TAP" = "" ]; then
+ if [ -e "$NOSUDO_FLAG" ]; then
+ echo "Error: There are no available tap devices to use for networking,"
+ echo "and I see $NOSUDO_FLAG exists, so I am not going to try creating"
+ echo "a new one with sudo."
+ exit 1
+ fi
+
+ GROUPID=`id -g`
+ USERID=`id -u`
+ echo "Setting up tap interface under sudo"
+ # Redirect stderr since we could see a LD_PRELOAD warning here if pseudo is loaded
+ # but inactive. This looks scary but is harmless
+ tap=`sudo $QEMUIFUP $USERID $GROUPID $OECORE_NATIVE_SYSROOT 2> /dev/null`
+ if [ $? -ne 0 ]; then
+ # Re-run standalone to see verbose errors
+ sudo $QEMUIFUP $USERID $GROUPID $OECORE_NATIVE_SYSROOT
+ return
+ fi
+ LOCKFILE="$LOCKDIR/$tap"
+ echo "Acquiring lockfile for $tap..."
+ acquire_lock $LOCKFILE
+ if [ $? -eq 0 ]; then
+ TAP=$tap
+ fi
+else
+ echo "Using preconfigured tap device '$TAP'"
+fi
+
+cleanup() {
+ if [ ! -e "$NOSUDO_FLAG" ]; then
+ # Redirect stderr since we could see a LD_PRELOAD warning here if pseudo is loaded
+ # but inactive. This looks scary but is harmless
+ sudo $QEMUIFDOWN $TAP $OECORE_NATIVE_SYSROOT 2> /dev/null
+ fi
+ echo "Releasing lockfile of preconfigured tap device '$TAP'"
+ release_lock $LOCKFILE
+
+ if [ "$NFSRUNNING" = "true" ]; then
+ echo "Shutting down the userspace NFS server..."
+ echo "runqemu-export-rootfs stop $ROOTFS"
+ runqemu-export-rootfs stop $ROOTFS
+ fi
+ # If QEMU crashes or somehow tty properties are not restored
+ # after qemu exits, we need to run stty sane
+ stty sane
+}
+
+n0=$(echo $TAP | sed 's/tap//')
+n1=$(($n0 * 2 + 1))
+n2=$(($n1 + 1))
+
+KERNEL_NETWORK_CMD="ip=192.168.7.$n2::192.168.7.$n1:255.255.255.0"
+QEMU_TAP_CMD="-net tap,vlan=0,ifname=$TAP,script=no,downscript=no"
+QEMU_NETWORK_CMD="-net nic,vlan=0 $QEMU_TAP_CMD"
+KERNCMDLINE="mem=$QEMU_MEMORY"
+QEMU_UI_OPTIONS="-show-cursor -usb -usbdevice wacom-tablet"
+
+NFS_INSTANCE=`echo $TAP | sed 's/tap//'`
+export NFS_INSTANCE
+
+SERIALOPTS=""
+if [ "x$SERIAL_LOGFILE" != "x" ]; then
+ SERIALOPTS="-serial file:$SERIAL_LOGFILE"
+fi
+
+case "$MACHINE" in
+ "qemuarm") ;;
+ "qemumips") ;;
+ "qemuppc") ;;
+ "qemuarmv6") ;;
+ "qemuarmv7") ;;
+ "qemux86") ;;
+ "qemux86-64") ;;
+ "akita") ;;
+ "spitz") ;;
+ *)
+ echo "Error: Unsupported machine type $MACHINE"
+ return
+ ;;
+esac
+
+if [ ! -f "$KERNEL" ]; then
+ echo "Error: Kernel image file $KERNEL doesn't exist"
+ cleanup
+ return
+fi
+
+if [ "$FSTYPE" != "nfs" -a ! -f "$ROOTFS" ]; then
+ echo "Error: Image file $ROOTFS doesn't exist"
+ cleanup
+ return
+fi
+
+if [ "$FSTYPE" = "nfs" ]; then
+ NFS_SERVER="192.168.7.1"
+ NFS_DIR=`echo $ROOTFS | sed 's/^[^:]*:\(.*\)/\1/'`
+ MOUNTD_RPCPORT=$[ 21111 + $NFS_INSTANCE ]
+ NFSD_RPCPORT=$[ 11111 + $NFS_INSTANCE ]
+ NFSD_PORT=$[ 3049 + $NFS_INSTANCE ]
+ MOUNTD_PORT=$[ 3048 + $NFS_INSTANCE ]
+ UNFS_OPTS="nfsvers=2,mountprog=$MOUNTD_RPCPORT,nfsprog=$NFSD_RPCPORT,udp,port=$NFSD_PORT,mountport=$MOUNTD_PORT"
+
+ PSEUDO_LOCALSTATEDIR=~/.runqemu-sdk/pseudo
+ export PSEUDO_LOCALSTATEDIR
+
+ # Start the userspace NFS server
+ echo "runqemu-export-rootfs restart $ROOTFS"
+ runqemu-export-rootfs restart $ROOTFS
+ if [ $? != 0 ]; then
+ cleanup
+ return
+ fi
+ NFSRUNNING="true"
+fi
+
+if [ "$NFS_SERVER" = "" ]; then
+ NFS_SERVER="192.168.7.1"
+ NFS_DIR=$ROOTFS
+fi
+
+if [ "$MACHINE" = "qemuarm" -o "$MACHINE" = "qemuarmv6" -o "$MACHINE" = "qemuarmv7" ]; then
+ QEMU=qemu-system-arm
+ MACHINE_SUBTYPE=versatilepb
+ QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
+ # QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS -force-pointer"
+ if [ "$FSTYPE" = "ext3" -o "$FSTYPE" = "btrfs" ]; then
+ KERNCMDLINE="root=/dev/sda rw console=ttyAMA0,115200 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY highres=off"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -M versatilepb -hda $ROOTFS -no-reboot $QEMU_UI_OPTIONS"
+ fi
+ if [ "$FSTYPE" = "nfs" ]; then
+ if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
+ echo "Error: NFS mount point $ROOTFS doesn't exist"
+ cleanup
+ return
+ fi
+ KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -M versatilepb --no-reboot $QEMU_UI_OPTIONS"
+ fi
+ if [ "$MACHINE" = "qemuarmv6" ]; then
+ QEMUOPTIONS="$QEMUOPTIONS -cpu arm1136"
+ fi
+ if [ "$MACHINE" = "qemuarmv7" ]; then
+ QEMUOPTIONS="$QEMUOPTIONS -cpu cortex-a8"
+ fi
+fi
+
+if [ "$MACHINE" = "qemux86" ]; then
+ QEMU=qemu
+ QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS -vga vmware -enable-gl"
+ if [ "$FSTYPE" = "ext3" -o "$FSTYPE" = "btrfs" ]; then
+ KERNCMDLINE="vga=0 root=/dev/hda rw mem=$QEMU_MEMORY $KERNEL_NETWORK_CMD"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -hda $ROOTFS $QEMU_UI_OPTIONS"
+ fi
+ if [ "$FSTYPE" = "nfs" ]; then
+ if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
+ echo "Error: NFS mount point $ROOTFS doesn't exist."
+ cleanup
+ return
+ fi
+ KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD $QEMU_UI_OPTIONS"
+ fi
+ # Currently oprofile's event based interrupt mode doesn't work(Bug #828) in
+ # qemux86 and qemux86-64. We can use timer interrupt mode for now.
+ KERNCMDLINE="$KERNCMDLINE oprofile.timer=1"
+fi
+
+if [ "$MACHINE" = "qemux86-64" ]; then
+ QEMU=qemu-system-x86_64
+ QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS -vga vmware -enable-gl"
+ if [ "$FSTYPE" = "ext3" -o "$FSTYPE" = "btrfs" ]; then
+ KERNCMDLINE="vga=0 root=/dev/hda rw mem=$QEMU_MEMORY $KERNEL_NETWORK_CMD"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -hda $ROOTFS $QEMU_UI_OPTIONS"
+ fi
+ if [ "$FSTYPE" = "nfs" ]; then
+ if [ "x$ROOTFS" = "x" ]; then
+ ROOTFS=/srv/nfs/qemux86-64
+ fi
+ if [ ! -d "$ROOTFS" ]; then
+ echo "Error: NFS mount point $ROOTFS doesn't exist."
+ cleanup
+ return
+ fi
+ KERNCMDLINE="root=/dev/nfs nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD $QEMU_UI_OPTIONS"
+ fi
+ # Currently oprofile's event based interrupt mode doesn't work(Bug #828) in
+ # qemux86 and qemux86-64. We can use timer interrupt mode for now.
+ KERNCMDLINE="$KERNCMDLINE oprofile.timer=1"
+fi
+
+if [ "$MACHINE" = "spitz" ]; then
+ QEMU=qemu-system-arm
+ if [ "$FSTYPE" = "ext3" -o "$FSTYPE" = "btrfs" ]; then
+ echo $ROOTFS
+ ROOTFS=`readlink -f $ROOTFS`
+ echo $ROOTFS
+ if [ ! -e "$ROOTFS.qemudisk" ]; then
+ echo "Adding a partition table to the ext3 image for use by QEMU, please wait..."
+ runqemu-addptable2image $ROOTFS $ROOTFS.qemudisk
+ fi
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -M spitz -hda $ROOTFS.qemudisk -portrait"
+ fi
+fi
+
+if [ "$MACHINE" = "qemumips" ]; then
+ QEMU=qemu-system-mips
+ MACHINE_SUBTYPE=malta
+ QEMU_UI_OPTIONS="-vga cirrus $QEMU_UI_OPTIONS"
+ if [ "$FSTYPE" = "ext3" -o "$FSTYPE" = "btrfs" ]; then
+ #KERNCMDLINE="root=/dev/hda console=ttyS0 console=tty0 $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ KERNCMDLINE="root=/dev/hda rw console=ttyS0 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -M $MACHINE_SUBTYPE -hda $ROOTFS -no-reboot $QEMU_UI_OPTIONS"
+ fi
+ if [ "$FSTYPE" = "nfs" ]; then
+ if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
+ echo "Error: NFS mount point $ROOTFS doesn't exist"
+ cleanup
+ return
+ fi
+ KERNCMDLINE="root=/dev/nfs console=ttyS0 console=tty nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -M $MACHINE_SUBTYPE -no-reboot $QEMU_UI_OPTIONS"
+ fi
+fi
+
+if [ "$MACHINE" = "qemuppc" ]; then
+ QEMU=qemu-system-ppc
+ MACHINE_SUBTYPE=mac99
+ CPU_SUBTYPE=G4
+ QEMU_UI_OPTIONS="$QEMU_UI_OPTIONS"
+ QEMU_NETWORK_CMD="-net nic,model=pcnet $QEMU_TAP_CMD"
+ if [ "$FSTYPE" = "ext3" -o "$FSTYPE" = "btrfs" ]; then
+ KERNCMDLINE="root=/dev/hda rw console=ttyS0 console=tty $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE -M $MACHINE_SUBTYPE -hda $ROOTFS -no-reboot $QEMU_UI_OPTIONS"
+ fi
+ if [ "$FSTYPE" = "nfs" ]; then
+ if [ "$NFS_SERVER" = "192.168.7.1" -a ! -d "$NFS_DIR" ]; then
+ echo "Error: NFS mount point $ROOTFS doesn't exist"
+ cleanup
+ return
+ fi
+ KERNCMDLINE="root=/dev/nfs console=ttyS0 console=tty nfsroot=$NFS_SERVER:$NFS_DIR,$UNFS_OPTS rw $KERNEL_NETWORK_CMD mem=$QEMU_MEMORY"
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -cpu $CPU_SUBTYPE -M $MACHINE_SUBTYPE -no-reboot $QEMU_UI_OPTIONS"
+ fi
+fi
+
+if [ "$MACHINE" = "akita" ]; then
+ QEMU=qemu-system-arm
+ if [ "$FSTYPE" = "jffs2" ]; then
+ ROOTFS=`readlink -f $ROOTFS`
+ if [ ! -e "$ROOTFS.qemuflash" ]; then
+ echo "Converting raw image into flash image format for use by QEMU, please wait..."
+ raw2flash.akita < $ROOTFS > $ROOTFS.qemuflash
+ fi
+ QEMUOPTIONS="$QEMU_NETWORK_CMD -M akita -mtdblock $ROOTFS.qemuflash -portrait"
+ fi
+fi
+
+if [ "x$QEMUOPTIONS" = "x" ]; then
+ echo "Error: Unable to support this combination of options"
+ cleanup
+ return
+fi
+
+PATH=$CROSSPATH:$OECORE_NATIVE_SYSROOT/usr/bin:$PATH
+
+QEMUBIN=`which $QEMU 2> /dev/null`
+if [ ! -x "$QEMUBIN" ]; then
+ echo "Error: No QEMU binary '$QEMU' could be found."
+ cleanup
+ return
+fi
+
+do_quit() {
+ if [ -n "$PIDFILE" ]; then
+ #echo kill `cat $PIDFILE`
+ kill `cat $PIDFILE`
+ fi
+ cleanup
+ return
+}
+
+DISTCCD=`which distccd 2> /dev/null`
+PIDFILE=""
+
+trap do_quit INT TERM QUIT
+
+if [ -x "$DISTCCD" ]; then
+ echo "Starting distccd..."
+ PIDFILE=`mktemp`
+ $DISTCCD --allow 192.168.7.2 --daemon --pid-file $PIDFILE &
+else
+ echo "WARNING: distccd not present, no distcc support loaded."
+fi
+
+# qemu got segfault if linked with nVidia's libgl
+GL_LD_PRELOAD=$LD_PRELOAD
+
+if ldd $QEMUBIN | grep -i nvidia &> /dev/null
+then
+cat << EOM
+WARNING: nVidia proprietary OpenGL libraries detected.
+nVidia's OpenGL libraries are known to have compatibility issues with qemu,
+resulting in a segfault. Please uninstall these drivers or ensure the mesa libGL
+libraries precede nvidia's via LD_PRELOAD(Already do it on Ubuntu 10).
+EOM
+
+# Automatically use Ubuntu system's mesa libGL, other distro can add its own path
+if grep -i ubuntu /etc/lsb-release &> /dev/null
+then
+ # precede nvidia's driver on Ubuntu 10
+ UBUNTU_MAIN_VERSION=`cat /etc/lsb-release |grep DISTRIB_RELEASE |cut -d= -f 2| cut -d. -f 1`
+ if [ "$UBUNTU_MAIN_VERSION" = "10" ];
+ then
+ GL_PATH=""
+ if test -e /usr/lib/libGL.so
+ then
+ GL_PATH="/usr/lib/libGL.so"
+ elif test -e /usr/lib/x86_64-linux-gnu/libGL.so
+ then
+ GL_PATH="/usr/lib/x86_64-linux-gnu/libGL.so"
+ fi
+
+ echo "Skip nVidia's libGL on Ubuntu 10!"
+ GL_LD_PRELOAD="$GL_PATH $LD_PRELOAD"
+ fi
+fi
+fi
+
+echo "Running $QEMU..."
+# -no-reboot is a mandatory option - see bug #100
+echo $QEMUBIN -kernel $KERNEL $QEMUOPTIONS $SERIALOPTS -no-reboot $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT --append '"'$KERNCMDLINE $SCRIPT_KERNEL_OPT'"'
+LD_PRELOAD="$GL_LD_PRELOAD" $QEMUBIN -kernel $KERNEL $QEMUOPTIONS $SERIALOPTS -no-reboot $SCRIPT_QEMU_OPT $SCRIPT_QEMU_EXTRA_OPT --append "$KERNCMDLINE $SCRIPT_KERNEL_OPT"
+
+
+cleanup
+
+trap - INT TERM QUIT
diff --git a/scripts/runqemu.README b/scripts/runqemu.README
new file mode 100644
index 000000000..5908d831a
--- /dev/null
+++ b/scripts/runqemu.README
@@ -0,0 +1,42 @@
+Using OE images with QEMU
+=========================
+
+OE-Core can generate qemu bootable kernels and images with can be used
+on a desktop system. The scripts currently support booting ARM, MIPS, PowerPC
+and x86 (32 and 64 bit) images. The scripts can be used within the OE build
+system or externaly.
+
+The runqemu script is run as:
+
+ runqemu <machine> <zimage> <filesystem>
+
+where:
+
+ <machine> is the machine/architecture to use (qemuarm/qemumips/qemuppc/qemux86/qemux86-64)
+ <zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
+ <filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2) or an nfs directory
+
+If <machine> isn't specified, the script will try to detect the machine name
+from the name of the <zimage> file.
+
+If <filesystem> isn't specified, nfs booting will be assumed.
+
+When used within the build system, it will default to qemuarm, ext2 and the last kernel and
+core-image-sato-sdk image built by the build system. If an sdk image isn't present it will look
+for sato and minimal images.
+
+Full usage instructions can be seen by running the command with no options specified.
+
+
+Notes
+=====
+
+ - The scripts run qemu using sudo. Change perms on /dev/net/tun to
+ run as non root. The runqemu-gen-tapdevs script can also be used by
+ root to prepopulate the appropriate network devices.
+ - You can access the host computer at 192.168.7.1 within the image.
+ - Your qemu system will be accessible as 192.16.7.2.
+ - The script extracts the root filesystem specified under pseudo and sets up a userspace
+ NFS server to share the image over by default meaning the filesystem can be accessed by
+ both the host and guest systems.
+
diff --git a/scripts/send-pull-request b/scripts/send-pull-request
new file mode 100755
index 000000000..09f42458c
--- /dev/null
+++ b/scripts/send-pull-request
@@ -0,0 +1,173 @@
+#!/bin/bash
+#
+# Copyright (c) 2010-2011, Intel Corporation.
+# All Rights Reserved
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+#
+# This script is intended to be used to send a patch series prepared by the
+# create-pull-request script to Open Embedded and The Yocto Project, as well
+# as to related projects and layers.
+#
+
+AUTO=0
+AUTO_CL=0
+GITSOBCC="--suppress-cc=all"
+
+# Prevent environment leakage to these vars.
+unset TO
+unset CC
+unset AUTO_CC
+
+usage()
+{
+cat <<EOM
+Usage: $(basename $0) [-h] [-a] [-c] [[-t email]...] -p pull-dir
+ -a Send the cover letter to every recipient listed in Cc and
+ Signed-off-by lines found in the cover letter and the patches.
+ This option implies -c.
+ -c Expand the Cc list for the individual patches using the Cc and
+ Signed-off-by lines from the same patch.
+ -p pull-dir Directory containing summary and patch files
+ -t email Explicitly add email to the recipients
+EOM
+}
+
+# Collect addresses from a patch into AUTO_CC
+# $1: a patch file
+harvest_recipients()
+{
+ PATCH=$1
+ export IFS=$',\n'
+ for REGX in "^[Cc][Cc]: *" "^[Ss]igned-[Oo]ff-[Bb]y: *"; do
+ for EMAIL in $(sed '/^---$/q' $PATCH | grep -e "$REGX" | sed "s/$REGX//"); do
+ if [ "${AUTO_CC/$EMAIL/}" == "$AUTO_CC" ] && [ -n "$EMAIL" ]; then
+ if [ -z "$AUTO_CC" ]; then
+ AUTO_CC=$EMAIL;
+ else
+ AUTO_CC="$AUTO_CC,$EMAIL";
+ fi
+ fi
+ done
+ done
+ unset IFS
+}
+
+# Parse and verify arguments
+while getopts "achp:t:" OPT; do
+ case $OPT in
+ a)
+ AUTO_CL=1
+ # Fall through to include -c
+ ;;
+ c)
+ AUTO=1
+ GITSOBCC="--signed-off-by-cc"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ p)
+ PDIR=${OPTARG%/}
+ if [ ! -d $PDIR ]; then
+ echo "ERROR: pull-dir \"$PDIR\" does not exist."
+ usage
+ exit 1
+ fi
+ ;;
+ t)
+ if [ -n "$TO" ]; then
+ TO="$TO,$OPTARG"
+ else
+ TO="$OPTARG"
+ fi
+ ;;
+ esac
+done
+
+if [ -z "$PDIR" ]; then
+ echo "ERROR: you must specify a pull-dir."
+ usage
+ exit 1
+fi
+
+
+# Verify the cover letter is complete and free of tokens
+if [ -e $PDIR/0000-cover-letter.patch ]; then
+ CL="$PDIR/0000-cover-letter.patch"
+ for TOKEN in SUBJECT BLURB; do
+ grep -q "*** $TOKEN HERE ***" "$CL"
+ if [ $? -eq 0 ]; then
+ echo "ERROR: Please edit $CL and try again (Look for '*** $TOKEN HERE ***')."
+ exit 1
+ fi
+ done
+else
+ echo "WARNING: No cover letter will be sent."
+fi
+
+# Harvest emails from the generated patches and populate AUTO_CC.
+if [ $AUTO_CL -eq 1 ]; then
+ for PATCH in $PDIR/*.patch; do
+ harvest_recipients $PATCH
+ done
+fi
+
+AUTO_TO="$(git config sendemail.to)"
+if [ -n "$AUTO_TO" ]; then
+ if [ -n "$TO" ]; then
+ TO="$TO,$AUTO_TO"
+ else
+ TO="$AUTO_TO"
+ fi
+fi
+
+if [ -z "$TO" ] && [ -z "$AUTO_CC" ]; then
+ echo "ERROR: you have not specified any recipients."
+ usage
+ exit 1
+fi
+
+
+# Convert the collected addresses into git-send-email argument strings
+export IFS=$','
+GIT_TO=$(for R in $TO; do echo -n "--to='$R' "; done)
+GIT_CC=$(for R in $AUTO_CC; do echo -n "--cc='$R' "; done)
+unset IFS
+
+
+# Handoff to git-send-email. It will perform the send confirmation.
+PATCHES=$(echo $PDIR/*.patch)
+if [ $AUTO_CL -eq 1 ]; then
+ # Send the cover letter to every recipient, both specified as well as
+ # harvested. Then remove it from the patches list.
+ eval "git send-email $GIT_TO $GIT_CC --confirm=always --no-chain-reply-to --suppress-cc=all $CL"
+ if [ $? -eq 1 ]; then
+ echo "ERROR: failed to send cover-letter with automatic recipients."
+ exit 1
+ fi
+ PATCHES=${PATCHES/"$CL"/}
+fi
+
+# Send the patch to the specified recipients and, if -c was specified, those git
+# finds in this specific patch.
+eval "git send-email $GIT_TO --confirm=always --no-chain-reply-to $GITSOBCC $PATCHES"
+if [ $? -eq 1 ]; then
+ echo "ERROR: failed to send patches."
+ exit 1
+fi
diff --git a/scripts/sstate-cache-management.sh b/scripts/sstate-cache-management.sh
new file mode 100755
index 000000000..be185bbb4
--- /dev/null
+++ b/scripts/sstate-cache-management.sh
@@ -0,0 +1,326 @@
+#!/bin/bash
+
+# Copyright (c) 2012 Wind River Systems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+# Global vars
+cache_dir=
+confirm=
+total_deleted=0
+verbose=
+
+usage () {
+ cat << EOF
+Welcome to sstate cache management utilities.
+sstate-cache-management.sh <OPTION>
+
+Options:
+ -h, --help
+ Display this help and exit.
+
+ --cache-dir=<sstate cache dir>
+ Specify sstate cache directory, will use the environment
+ variable SSTATE_CACHE_DIR if it is not specified.
+
+ --extra-layer=<layer1>,<layer2>...<layern>
+ Specify the layer which will be used for searching the archs,
+ it will search the meta and meta-* layers in the top dir by
+ default, and will search meta, meta-*, <layer1>, <layer2>,
+ ...<layern> when specified. Use "," as the separator.
+
+ This is useless for --stamps-dir.
+
+ -d, --remove-duplicated
+ Remove the duplicated sstate cache files of one package, only
+ the newest one will be kept.
+
+ Conflicts with --stamps-dir.
+
+ --stamps-dir=<dir1>,<dir2>...<dirn>
+ Specify the build directory's stamps directories, the sstate
+ cache file which IS USED by these build diretories will be KEPT,
+ other sstate cache files in cache-dir will be removed. Use ","
+ as the separator. For example:
+ --stamps-dir=build1/tmp/stamps,build2/tmp/stamps
+
+ Conflicts with --remove-duplicated.
+
+ -y, --yes
+ Automatic yes to prompts; assume "yes" as answer to all prompts
+ and run non-interactively.
+
+ -v, --verbose
+ explain what is being done
+
+EOF
+}
+
+if [ $# -lt 1 ]; then
+ usage
+ exit 0
+fi
+
+# Echo no files to remove
+no_files () {
+ echo No files to remove
+}
+
+# Echo nothing to do
+do_nothing () {
+ echo Nothing to do
+}
+
+# Read the input "y"
+read_confirm () {
+ echo -n "$total_deleted files will be removed! "
+ if [ "$confirm" != "y" ]; then
+ echo -n "Do you want to continue (y/n)? "
+ while read confirm; do
+ [ "$confirm" = "Y" -o "$confirm" = "y" -o "$confirm" = "n" \
+ -o "$confirm" = "N" ] && break
+ echo -n "Invalid input \"$confirm\", please input 'y' or 'n': "
+ done
+ else
+ echo
+ fi
+}
+
+# Print error information and exit.
+echo_error () {
+ echo "ERROR: $1" >&2
+ exit 1
+}
+
+# Remove the duplicated cache files for the pkg, keep the newest one
+remove_duplicated () {
+
+ local topdir
+ local tunedirs
+ local all_archs
+ local ava_archs
+ local arch
+ local file_names
+ local sstate_list
+ local fn_tmp
+ local list_suffix=`mktemp` || exit 1
+
+ # Find out the archs in all the layers
+ echo -n "Figuring out the archs in the layers ... "
+ topdir=$(dirname $(dirname $(readlink -e $0)))
+ tunedirs="`find $topdir/meta* $layers -path '*/meta*/conf/machine/include'`"
+ [ -n "$tunedirs" ] || echo_error "Can't find the tune directory"
+ all_archs=`grep -r -h "^AVAILTUNES " $tunedirs | sed -e 's/.*=//' -e 's/\"//g'`
+ # Add the qemu and native archs
+ # Use the "_" to substitute "-", e.g., x86-64 to x86_64
+ # Sort to remove the duplicated ones
+ all_archs=$(echo $all_archs qemuarm qemux86 qemumips qemuppc qemux86_64 $(uname -m) \
+ | sed -e 's/-/_/g' -e 's/ /\n/g' | sort -u)
+ echo "Done"
+
+ sstate_suffixes="deploy-rpm deploy-ipk deploy-deb deploy package populate-lic populate-sysroot"
+
+ cd $cache_dir || exit 1
+ # Save all the sstate files in a file
+ sstate_list=`mktemp` || exit 1
+ ls sstate-*.tgz >$sstate_list
+ echo -n "Figuring out the archs in the sstate cache dir ... "
+ for arch in $all_archs; do
+ grep -q -w $arch $sstate_list
+ [ $? -eq 0 ] && ava_archs="$ava_archs $arch"
+ done
+ echo "Done"
+ echo "The following archs have been found in the cache dir:"
+ echo $ava_archs
+ echo ""
+
+ # Save the file list which needs to be removed
+ local remove_listdir=`mktemp -d` || exit 1
+
+ for suffix in $sstate_suffixes; do
+ # Save the file list to a file, some suffix's file may not exist
+ ls *_$suffix.tgz >$list_suffix 2>/dev/null
+ local deleted=0
+ echo -n "Figuring out the sstate-xxx_$suffix.tgz ... "
+ # There are at list 6 dashes (-) after arch, use this to avoid the
+ # greedy match of sed.
+ file_names=`for arch in $ava_archs; do
+ sed -ne 's/^\(sstate-.*\)-'"$arch"'-.*-.*-.*-.*-.*-.*/\1/p' $list_suffix
+ done | sort -u`
+
+ fn_tmp=`mktemp` || exit 1
+ for fn in $file_names; do
+ [ -z "$verbose" ] || echo "Analyzing $fn-xxx_$suffix.tgz"
+ for arch in $ava_archs; do
+ grep -h "^$fn-$arch-" $list_suffix >>$fn_tmp
+ done
+ # Use the access time, also delete the .siginfo file
+ to_del=$(ls -u $(cat $fn_tmp) | sed -n '1!p' | sed -e 'p' -e 's/$/.siginfo/')
+ echo $to_del >>$remove_listdir/sstate-xxx_$suffix
+ let deleted=$deleted+`echo $to_del | wc -w`
+ rm -f $fn_tmp
+ done
+ echo "($deleted files will be removed)"
+ let total_deleted=$total_deleted+$deleted
+ done
+ rm -f $list_suffix
+ if [ $total_deleted -gt 0 ]; then
+ read_confirm
+ if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
+ for list in `ls $remove_listdir/`; do
+ echo -n "Removing $list.tgz (`cat $remove_listdir/$list | wc -w` files) ... "
+ rm -f $verbose `cat $remove_listdir/$list`
+ echo "Done"
+ done
+ echo "$total_deleted files have been removed!"
+ else
+ do_nothing
+ fi
+ else
+ no_files
+ fi
+ [ -d $remove_listdir ] && rm -fr $remove_listdir
+}
+
+# Remove the sstate file by stamps dir, the file not used by the stamps dir
+# will be removed.
+rm_by_stamps (){
+
+ local cache_list=`mktemp` || exit 1
+ local keep_list=`mktemp` || exit 1
+ local mv_to_dir=`mktemp -d -p $cache_dir` || exit 1
+ local suffixes
+ local sums
+ local all_sums
+
+ suffixes="populate_sysroot populate_lic package_write_ipk \
+ package_write_rpm package_write_deb package deploy"
+
+ # Figure out all the md5sums in the stamps dir.
+ echo -n "Figuring out all the md5sums in stamps dir ... "
+ for i in $suffixes; do
+ sums=`find $stamps -maxdepth 2 -name "*\.do_$i\.sigdata.*" | \
+ sed 's#.*\.sigdata\.##' | sort -u`
+ all_sums="$all_sums $sums"
+ done
+ echo "Done"
+
+ # Save all the state file list to a file
+ ls $cache_dir/sstate-*.tgz >$cache_list
+
+ echo -n "Figuring out the files which will be removed ... "
+ for i in $all_sums; do
+ grep ".*-$i.*" $cache_list >>$keep_list
+ done
+ echo "Done"
+
+ if [ -s $keep_list ]; then
+ let total_deleted=(`cat $cache_list | wc -w` - `cat $keep_list | wc -l`)*2
+
+ if [ $total_deleted -gt 0 ]; then
+ read_confirm
+ if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
+ echo "Removing sstate cache files ... ($total_deleted files)"
+ # Save the file which needs to be kept, remove the others,
+ # then move it back
+ for i in `cat $keep_list`; do
+ mv $i $mv_to_dir
+ mv $i.siginfo $mv_to_dir || true
+ done
+ rm -f $verbose $cache_dir/sstate-*.tgz
+ rm -f $verbose $cache_dir/sstate-*.tgz.siginfo
+ mv $mv_to_dir/* $cache_dir/
+ echo "$total_deleted files have been removed"
+ else
+ do_nothing
+ fi
+ else
+ no_files
+ fi
+ else
+ echo_error "All files in cache dir will be removed! Abort!"
+ fi
+
+ rm -f $cache_list
+ rm -f $keep_list
+ rmdir $mv_to_dir
+}
+
+# Parse arguments
+while [ -n "$1" ]; do
+ case $1 in
+ --cache-dir=*)
+ cache_dir=`echo $1 | sed -e 's#^--cache-dir=##' | xargs readlink -e`
+ [ -d "$cache_dir" ] || echo_error "Invalid argument to --cache-dir"
+ shift
+ ;;
+ --remove-duplicated|-d)
+ rm_duplicated="y"
+ shift
+ ;;
+ --yes|-y)
+ confirm="y"
+ shift
+ ;;
+ --extra-layer=*)
+ extra_layers=`echo $1 | sed -e 's#^--extra-layer=##' -e 's#,# #g'`
+ [ -n "$extra_layers" ] || echo_error "Invalid extra layer $i"
+ for i in $extra_layers; do
+ l=`readlink -e $i`
+ if [ -d "$l" ]; then
+ layers="$layers $l"
+ else
+ echo_error "Can't find layer $i"
+ fi
+ done
+ shift
+ ;;
+ --stamps-dir=*)
+ stamps=`echo $1 | sed -e 's#^--stamps-dir=##' -e 's#,# #g'`
+ [ -n "$stamps" ] || echo_error "Invalid stamps dir $i"
+ for i in $stamps; do
+ [ -d "$i" ] || echo_error "Invalid stamps dir $i"
+ done
+ shift
+ ;;
+ --verbose|-v)
+ verbose="-v"
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ *)
+ echo "Invalid arguments $*"
+ echo_error "Try 'sstate-cache-management.sh -h' for more information."
+ ;;
+ esac
+done
+
+# sstate cache directory, use environment variable SSTATE_CACHE_DIR
+# if it was not specified, otherwise, error.
+[ -n "$cache_dir" ] || cache_dir=$SSTATE_CACHE_DIR
+[ -n "$cache_dir" ] || echo_error "No cache dir found!"
+[ -d "$cache_dir" ] || echo_error "Invalid cache directory \"$cache_dir\""
+
+[ -n "$rm_duplicated" -a -n "$stamps" ] && \
+ echo_error "Can not use both --remove-duplicated and --stamps-dir"
+
+[ "$rm_duplicated" = "y" ] && remove_duplicated
+[ -n "$stamps" ] && rm_by_stamps
+[ -z "$rm_duplicated" -a -z "$stamps" ] && \
+ echo "What do you want to do?"
+
diff --git a/scripts/swabber-strace-attach b/scripts/swabber-strace-attach
new file mode 100755
index 000000000..d4f80e4e9
--- /dev/null
+++ b/scripts/swabber-strace-attach
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+import os
+import sys
+
+# Detach from the controlling terminal and parent process by forking twice to daemonize ourselves,
+# then run the command passed as argv[1]. Send log data to argv[2].
+
+pid = os.fork()
+if (pid == 0):
+ os.setsid()
+ pid = os.fork()
+ if (pid != 0):
+ os._exit(0)
+else:
+ sys.exit()
+
+
+si = file(os.devnull, 'r')
+so = file(sys.argv[2], 'w')
+se = so
+
+# Replace those fds with our own
+os.dup2(si.fileno(), sys.stdin.fileno())
+os.dup2(so.fileno(), sys.stdout.fileno())
+os.dup2(se.fileno(), sys.stderr.fileno())
+
+ret = os.system(sys.argv[1])
+
+os._exit(ret)
+
diff --git a/scripts/test-reexec b/scripts/test-reexec
new file mode 100755
index 000000000..9eaa96e75
--- /dev/null
+++ b/scripts/test-reexec
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+# Test Script for task re-execution
+#
+# Copyright 2012 Intel Corporation
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# DESCRIPTION
+# This script is intended to address issues for re-execution of
+# tasks. The test results are saved in ./reexeclogs. Force build
+# logs are saved with prefix "force". Build failure logs are saved with
+# prefix "failed". Log files with prefix "initial" are used to save
+# initial build logs for each recipe. Log files with prefix "clean" are
+# used to save logs of clean task after testing for a recipe is finished.
+#
+
+targets=`bitbake -s | cut -d " " -f 1`
+
+LOGS=./reexeclogs
+
+mkdir -p $LOGS
+
+# Clear sstate files for specified recipe
+function clearsstate {
+ target=$1
+
+ sstate_dir=`bitbake $target -e | grep "^SSTATE_DIR" | cut -d "\"" -f 2`
+ sstate_pkgspec=`bitbake $target -e | grep "^SSTATE_PKGSPEC" | cut -d "\"" -f 2`
+ sstasks=`bitbake $target -e | grep "^SSTATETASKS" | cut -d "\"" -f 2`
+
+ for sstask in $sstasks
+ do
+ sstask=${sstask:3}
+ case $sstask in
+ populate_sysroot) sstask="populate-sysroot"
+ ;;
+ populate_lic) sstask="populate-lic"
+ ;;
+ package_write_ipk) sstask="deploy-ipk"
+ ;;
+ package_write_deb) sstask="deploy-deb"
+ ;;
+ package_write_rpm) sstask="deploy-rpm"
+ ;;
+ package) sstask="package"
+ ;;
+ deploy) sstask="deploy"
+ ;;
+ *)
+ ;;
+ esac
+
+ echo "Removing ${sstate_dir}/${sstate_pkgspec}*_${sstask}.tgz* for $target"
+ rm -rf ${sstate_dir}/${sstate_pkgspec}*_${sstask}.tgz*
+ done
+}
+
+# Function to re-execute specified task of recipe
+function testit {
+ target=$1
+ task=$2
+
+ task=`echo $task | sed 's/_setscene//'`
+
+ if [ -f $LOGS/force.$target.$task ]; then
+ return
+ fi
+
+ case $task in
+ clean|build|cleansstate|cleanall|package|cleansstate2|package_write|package_write_ipk|package_write_rpm|package_write_deb|fetch|populate_lic) return;;
+ fetchall|devshell|buildall|listtasks|checkuri|checkuriall) return;;
+ esac
+
+ echo "Attempting target $target, task $task"
+ echo "Initial build"
+ bitbake $target -c cleansstate > $LOGS/initial.$target.$task
+ bitbake $target >> $LOGS/initial.$target.$task
+ clearsstate $target >> $LOGS/initial.$target.$task
+ echo "Re-execution build"
+ bitbake $target -c $task -f > $LOGS/force.$target.$task
+ if [ "$?" != 0 ]; then
+ echo "FAILURE for $target $task"
+ cp $LOGS/force.$target.$task $LOGS/failed.$target.$task
+ bitbake $target -c clean > $LOGS/clean.$target.$task
+ else
+ bitbake $target >> $LOGS/force.$target.$task
+ if [ "$?" != 0 ]; then
+ echo "FAILURE2 for $target $task"
+ cp $LOGS/force.$target.$task $LOGS/failed.$target.$task
+ bitbake $target -c clean > $LOGS/clean.$target.$task
+ fi
+ fi
+ echo "Done"
+}
+
+# Go through the recipe list and these recipes' task list
+# Then re-execute them
+for target in $targets; do
+ # Remove log messages from bitbake output
+ case $target in
+ Summary*|WARNING*|Loading*|Loaded*|Package*|=====*) continue;;
+ esac
+ tasks=`bitbake $target -c listtasks | grep ^do_ | sed s/do_//`
+ for task in $tasks; do
+ testit $target $task
+ done
+done
+
+